--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
--- /dev/null
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
--- /dev/null
+[package]
+authors = ["The Rust Project Developers"]
+name = "core"
+version = "0.0.0"
+autotests = false
+autobenches = false
+edition = "2018"
+
+[lib]
+test = false
+bench = false
+
+[[test]]
+name = "coretests"
+path = "tests/lib.rs"
+
+[[bench]]
+name = "corebenches"
+path = "benches/lib.rs"
+test = true
+
+[dev-dependencies]
+rand = "0.7"
+
+[features]
+# Make panics and failed asserts immediately abort without formatting any message
+panic_immediate_abort = []
--- /dev/null
+use core::any::*;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_downcast_ref(b: &mut Bencher) {
+ b.iter(|| {
+ let mut x = 0;
+ let mut y = &mut x as &mut dyn Any;
+ black_box(&mut y);
+ black_box(y.downcast_ref::<isize>() == Some(&0));
+ });
+}
--- /dev/null
+mod is_ascii;
+
+// Lower-case ASCII 'a' is the first byte that has its highest bit set
+// after wrap-adding 0x1F:
+//
+// b'a' + 0x1F == 0x80 == 0b1000_0000
+// b'z' + 0x1F == 0x98 == 0b1001_1000
+//
+// Lower-case ASCII 'z' is the last byte that has its highest bit unset
+// after wrap-adding 0x05:
+//
+// b'a' + 0x05 == 0x66 == 0b0110_0110
+// b'z' + 0x05 == 0x7F == 0b0111_1111
+//
+// … except for 0xFB to 0xFF, but those are in the range of bytes
+// that have the highest bit unset again after adding 0x1F.
+//
+// So `(byte + 0x1f) & !(byte + 5)` has its highest bit set
+// iff `byte` is a lower-case ASCII letter.
+//
+// Lower-case ASCII letters all have the 0x20 bit set.
+// (Two positions right of 0x80, the highest bit.)
+// Unsetting that bit produces the same letter, in upper-case.
+//
+// Therefore:
+fn branchless_to_ascii_upper_case(byte: u8) -> u8 {
+ byte & !((byte.wrapping_add(0x1f) & !byte.wrapping_add(0x05) & 0x80) >> 2)
+}
+
+macro_rules! benches {
+ ($( fn $name: ident($arg: ident: &mut [u8]) $body: block )+ @iter $( $is_: ident, )+) => {
+ benches! {@
+ $( fn $name($arg: &mut [u8]) $body )+
+ $( fn $is_(bytes: &mut [u8]) { bytes.iter().all(u8::$is_) } )+
+ }
+ };
+
+ (@$( fn $name: ident($arg: ident: &mut [u8]) $body: block )+) => {
+ benches!(mod short SHORT $($name $arg $body)+);
+ benches!(mod medium MEDIUM $($name $arg $body)+);
+ benches!(mod long LONG $($name $arg $body)+);
+ };
+
+ (mod $mod_name: ident $input: ident $($name: ident $arg: ident $body: block)+) => {
+ mod $mod_name {
+ use super::*;
+
+ $(
+ #[bench]
+ fn $name(bencher: &mut Bencher) {
+ bencher.bytes = $input.len() as u64;
+ bencher.iter(|| {
+ let mut vec = $input.as_bytes().to_vec();
+ {
+ let $arg = &mut vec[..];
+ black_box($body);
+ }
+ vec
+ })
+ }
+ )+
+ }
+ }
+}
+
+use test::black_box;
+use test::Bencher;
+
+benches! {
+ fn case00_alloc_only(_bytes: &mut [u8]) {}
+
+ fn case01_black_box_read_each_byte(bytes: &mut [u8]) {
+ for byte in bytes {
+ black_box(*byte);
+ }
+ }
+
+ fn case02_lookup_table(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = ASCII_UPPERCASE_MAP[*byte as usize]
+ }
+ }
+
+ fn case03_branch_and_subtract(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = if b'a' <= *byte && *byte <= b'z' {
+ *byte - b'a' + b'A'
+ } else {
+ *byte
+ }
+ }
+ }
+
+ fn case04_branch_and_mask(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = if b'a' <= *byte && *byte <= b'z' {
+ *byte & !0x20
+ } else {
+ *byte
+ }
+ }
+ }
+
+ fn case05_branchless(bytes: &mut [u8]) {
+ for byte in bytes {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ }
+
+ fn case06_libcore(bytes: &mut [u8]) {
+ bytes.make_ascii_uppercase()
+ }
+
+ fn case07_fake_simd_u32(bytes: &mut [u8]) {
+ // SAFETY: transmuting a sequence of `u8` to `u32` is always fine
+ let (before, aligned, after) = unsafe {
+ bytes.align_to_mut::<u32>()
+ };
+ for byte in before {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ for word in aligned {
+ // FIXME: this is incorrect for some byte values:
+ // addition within a byte can carry/overflow into the next byte.
+ // Test case: b"\xFFz "
+ *word &= !(
+ (
+ word.wrapping_add(0x1f1f1f1f) &
+ !word.wrapping_add(0x05050505) &
+ 0x80808080
+ ) >> 2
+ )
+ }
+ for byte in after {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ }
+
+ fn case08_fake_simd_u64(bytes: &mut [u8]) {
+ // SAFETY: transmuting a sequence of `u8` to `u64` is always fine
+ let (before, aligned, after) = unsafe {
+ bytes.align_to_mut::<u64>()
+ };
+ for byte in before {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ for word in aligned {
+ // FIXME: like above, this is incorrect for some byte values.
+ *word &= !(
+ (
+ word.wrapping_add(0x1f1f1f1f_1f1f1f1f) &
+ !word.wrapping_add(0x05050505_05050505) &
+ 0x80808080_80808080
+ ) >> 2
+ )
+ }
+ for byte in after {
+ *byte = branchless_to_ascii_upper_case(*byte)
+ }
+ }
+
+ fn case09_mask_mult_bool_branchy_lookup_table(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ if b >= 0x80 { return false }
+ match ASCII_CHARACTER_CLASS[b as usize] {
+ L | Lx => true,
+ _ => false,
+ }
+ }
+ for byte in bytes {
+ *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8))
+ }
+ }
+
+ fn case10_mask_mult_bool_lookup_table(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match ASCII_CHARACTER_CLASS[b as usize] {
+ L | Lx => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8))
+ }
+ }
+
+ fn case11_mask_mult_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8))
+ }
+ }
+
+ fn case12_mask_shifted_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte &= !((is_ascii_lowercase(*byte) as u8) << 5)
+ }
+ }
+
+ fn case13_subtract_shifted_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte -= (is_ascii_lowercase(*byte) as u8) << 5
+ }
+ }
+
+ fn case14_subtract_multiplied_bool_match_range(bytes: &mut [u8]) {
+ fn is_ascii_lowercase(b: u8) -> bool {
+ match b {
+ b'a'..=b'z' => true,
+ _ => false
+ }
+ }
+ for byte in bytes {
+ *byte -= (b'a' - b'A') * is_ascii_lowercase(*byte) as u8
+ }
+ }
+
+ @iter
+
+ is_ascii,
+ is_ascii_alphabetic,
+ is_ascii_uppercase,
+ is_ascii_lowercase,
+ is_ascii_alphanumeric,
+ is_ascii_digit,
+ is_ascii_hexdigit,
+ is_ascii_punctuation,
+ is_ascii_graphic,
+ is_ascii_whitespace,
+ is_ascii_control,
+}
+
+macro_rules! repeat {
+ ($s: expr) => {
+ concat!($s, $s, $s, $s, $s, $s, $s, $s, $s, $s)
+ };
+}
+
+const SHORT: &str = "Alice's";
+const MEDIUM: &str = "Alice's Adventures in Wonderland";
+const LONG: &str = repeat!(
+ r#"
+ La Guida di Bragia, a Ballad Opera for the Marionette Theatre (around 1850)
+ Alice's Adventures in Wonderland (1865)
+ Phantasmagoria and Other Poems (1869)
+ Through the Looking-Glass, and What Alice Found There
+ (includes "Jabberwocky" and "The Walrus and the Carpenter") (1871)
+ The Hunting of the Snark (1876)
+ Rhyme? And Reason? (1883) – shares some contents with the 1869 collection,
+ including the long poem "Phantasmagoria"
+ A Tangled Tale (1885)
+ Sylvie and Bruno (1889)
+ Sylvie and Bruno Concluded (1893)
+ Pillow Problems (1893)
+ What the Tortoise Said to Achilles (1895)
+ Three Sunsets and Other Poems (1898)
+ The Manlet (1903)[106]
+"#
+);
+
+#[rustfmt::skip]
+const ASCII_UPPERCASE_MAP: [u8; 256] = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'',
+ b'(', b')', b'*', b'+', b',', b'-', b'.', b'/',
+ b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7',
+ b'8', b'9', b':', b';', b'<', b'=', b'>', b'?',
+ b'@', b'A', b'B', b'C', b'D', b'E', b'F', b'G',
+ b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O',
+ b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W',
+ b'X', b'Y', b'Z', b'[', b'\\', b']', b'^', b'_',
+ b'`',
+
+ b'A', b'B', b'C', b'D', b'E', b'F', b'G',
+ b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O',
+ b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W',
+ b'X', b'Y', b'Z',
+
+ b'{', b'|', b'}', b'~', 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+];
+
+enum AsciiCharacterClass {
+ C, // control
+ Cw, // control whitespace
+ W, // whitespace
+ D, // digit
+ L, // lowercase
+ Lx, // lowercase hex digit
+ U, // uppercase
+ Ux, // uppercase hex digit
+ P, // punctuation
+ N, // Non-ASCII
+}
+use self::AsciiCharacterClass::*;
+
+#[rustfmt::skip]
+static ASCII_CHARACTER_CLASS: [AsciiCharacterClass; 256] = [
+// _0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _a _b _c _d _e _f
+ C, C, C, C, C, C, C, C, C, Cw,Cw,C, Cw,Cw,C, C, // 0_
+ C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, // 1_
+ W, P, P, P, P, P, P, P, P, P, P, P, P, P, P, P, // 2_
+ D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, P, // 3_
+ P, Ux,Ux,Ux,Ux,Ux,Ux,U, U, U, U, U, U, U, U, U, // 4_
+ U, U, U, U, U, U, U, U, U, U, U, P, P, P, P, P, // 5_
+ P, Lx,Lx,Lx,Lx,Lx,Lx,L, L, L, L, L, L, L, L, L, // 6_
+ L, L, L, L, L, L, L, L, L, L, L, P, P, P, P, C, // 7_
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
+];
--- /dev/null
+use super::{LONG, MEDIUM, SHORT};
+use test::black_box;
+use test::Bencher;
+
+macro_rules! benches {
+ ($( fn $name: ident($arg: ident: &[u8]) $body: block )+) => {
+ benches!(mod short SHORT[..] $($name $arg $body)+);
+ benches!(mod medium MEDIUM[..] $($name $arg $body)+);
+ benches!(mod long LONG[..] $($name $arg $body)+);
+ // Ensure we benchmark cases where the functions are called with strings
+ // that are not perfectly aligned or have a length which is not a
+ // multiple of size_of::<usize>() (or both)
+ benches!(mod unaligned_head MEDIUM[1..] $($name $arg $body)+);
+ benches!(mod unaligned_tail MEDIUM[..(MEDIUM.len() - 1)] $($name $arg $body)+);
+ benches!(mod unaligned_both MEDIUM[1..(MEDIUM.len() - 1)] $($name $arg $body)+);
+ };
+
+ (mod $mod_name: ident $input: ident [$range: expr] $($name: ident $arg: ident $body: block)+) => {
+ mod $mod_name {
+ use super::*;
+ $(
+ #[bench]
+ fn $name(bencher: &mut Bencher) {
+ bencher.bytes = $input[$range].len() as u64;
+ let mut vec = $input.as_bytes().to_vec();
+ bencher.iter(|| {
+ let $arg: &[u8] = &black_box(&mut vec)[$range];
+ black_box($body)
+ })
+ }
+ )+
+ }
+ };
+}
+
+benches! {
+ fn case00_libcore(bytes: &[u8]) {
+ bytes.is_ascii()
+ }
+
+ fn case01_iter_all(bytes: &[u8]) {
+ bytes.iter().all(|b| b.is_ascii())
+ }
+
+ fn case02_align_to(bytes: &[u8]) {
+ is_ascii_align_to(bytes)
+ }
+
+ fn case03_align_to_unrolled(bytes: &[u8]) {
+ is_ascii_align_to_unrolled(bytes)
+ }
+}
+
+// These are separate since it's easier to debug errors if they don't go through
+// macro expansion first.
+fn is_ascii_align_to(bytes: &[u8]) -> bool {
+ if bytes.len() < core::mem::size_of::<usize>() {
+ return bytes.iter().all(|b| b.is_ascii());
+ }
+ // SAFETY: transmuting a sequence of `u8` to `usize` is always fine
+ let (head, body, tail) = unsafe { bytes.align_to::<usize>() };
+ head.iter().all(|b| b.is_ascii())
+ && body.iter().all(|w| !contains_nonascii(*w))
+ && tail.iter().all(|b| b.is_ascii())
+}
+
+fn is_ascii_align_to_unrolled(bytes: &[u8]) -> bool {
+ if bytes.len() < core::mem::size_of::<usize>() {
+ return bytes.iter().all(|b| b.is_ascii());
+ }
+ // SAFETY: transmuting a sequence of `u8` to `[usize; 2]` is always fine
+ let (head, body, tail) = unsafe { bytes.align_to::<[usize; 2]>() };
+ head.iter().all(|b| b.is_ascii())
+ && body.iter().all(|w| !contains_nonascii(w[0] | w[1]))
+ && tail.iter().all(|b| b.is_ascii())
+}
+
+#[inline]
+fn contains_nonascii(v: usize) -> bool {
+ const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
+ (NONASCII_MASK & v) != 0
+}
--- /dev/null
+use test::Bencher;
+
+const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9'];
+const RADIX: [u32; 5] = [2, 8, 10, 16, 32];
+
+#[bench]
+fn bench_to_digit_radix_2(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(2)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_10(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(10)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_16(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(16)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_36(b: &mut Bencher) {
+ b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(36)).min())
+}
+
+#[bench]
+fn bench_to_digit_radix_var(b: &mut Bencher) {
+ b.iter(|| {
+ CHARS
+ .iter()
+ .cycle()
+ .zip(RADIX.iter().cycle())
+ .take(10_000)
+ .map(|(c, radix)| c.to_digit(*radix))
+ .min()
+ })
+}
--- /dev/null
+mod methods;
--- /dev/null
+use std::fmt::{self, Write as FmtWrite};
+use std::io::{self, Write as IoWrite};
+use test::Bencher;
+
+#[bench]
+fn write_vec_value(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ for _ in 0..1000 {
+ mem.write_all("abc".as_bytes()).unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_ref(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ wr.write_all("abc".as_bytes()).unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_macro1(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ write!(wr, "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_macro2(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ write!(wr, "{}", "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_vec_macro_debug(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = Vec::new();
+ let wr = &mut mem as &mut dyn io::Write;
+ for _ in 0..1000 {
+ write!(wr, "{:?}", "☃").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_value(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ for _ in 0..1000 {
+ mem.write_str("abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_ref(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ wr.write_str("abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro1(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ for _ in 0..1000 {
+ write!(mem, "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro2(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ write!(wr, "{}", "abc").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_str_macro_debug(bh: &mut Bencher) {
+ bh.iter(|| {
+ let mut mem = String::new();
+ let wr = &mut mem as &mut dyn fmt::Write;
+ for _ in 0..1000 {
+ write!(wr, "{:?}", "☃").unwrap();
+ }
+ });
+}
+
+#[bench]
+fn write_u128_max(bh: &mut Bencher) {
+ bh.iter(|| {
+ std::hint::black_box(format!("{}", u128::MAX));
+ });
+}
+
+#[bench]
+fn write_u128_min(bh: &mut Bencher) {
+ bh.iter(|| {
+ let s = format!("{}", 0u128);
+ std::hint::black_box(s);
+ });
+}
+
+#[bench]
+fn write_u64_max(bh: &mut Bencher) {
+ bh.iter(|| {
+ std::hint::black_box(format!("{}", u64::MAX));
+ });
+}
+
+#[bench]
+fn write_u64_min(bh: &mut Bencher) {
+ bh.iter(|| {
+ std::hint::black_box(format!("{}", 0u64));
+ });
+}
--- /dev/null
+#![allow(deprecated)]
+
+use core::hash::*;
+use test::{black_box, Bencher};
+
+fn hash_bytes<H: Hasher>(mut s: H, x: &[u8]) -> u64 {
+ Hasher::write(&mut s, x);
+ s.finish()
+}
+
+fn hash_with<H: Hasher, T: Hash>(mut st: H, x: &T) -> u64 {
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ hash_with(SipHasher::new(), x)
+}
+
+#[bench]
+fn bench_str_under_8_bytes(b: &mut Bencher) {
+ let s = "foo";
+ b.iter(|| {
+ assert_eq!(hash(&s), 16262950014981195938);
+ })
+}
+
+#[bench]
+fn bench_str_of_8_bytes(b: &mut Bencher) {
+ let s = "foobar78";
+ b.iter(|| {
+ assert_eq!(hash(&s), 4898293253460910787);
+ })
+}
+
+#[bench]
+fn bench_str_over_8_bytes(b: &mut Bencher) {
+ let s = "foobarbaz0";
+ b.iter(|| {
+ assert_eq!(hash(&s), 10581415515220175264);
+ })
+}
+
+#[bench]
+fn bench_long_str(b: &mut Bencher) {
+ let s = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \
+ incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
+ exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute \
+ irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \
+ pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui \
+ officia deserunt mollit anim id est laborum.";
+ b.iter(|| {
+ assert_eq!(hash(&s), 17717065544121360093);
+ })
+}
+
+#[bench]
+fn bench_u32(b: &mut Bencher) {
+ let u = 162629500u32;
+ let u = black_box(u);
+ b.iter(|| hash(&u));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_u32_keyed(b: &mut Bencher) {
+ let u = 162629500u32;
+ let u = black_box(u);
+ let k1 = black_box(0x1);
+ let k2 = black_box(0x2);
+ b.iter(|| hash_with(SipHasher::new_with_keys(k1, k2), &u));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_u64(b: &mut Bencher) {
+ let u = 16262950014981195938u64;
+ let u = black_box(u);
+ b.iter(|| hash(&u));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_bytes_4(b: &mut Bencher) {
+ let data = black_box([b' '; 4]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 4;
+}
+
+#[bench]
+fn bench_bytes_7(b: &mut Bencher) {
+ let data = black_box([b' '; 7]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 7;
+}
+
+#[bench]
+fn bench_bytes_8(b: &mut Bencher) {
+ let data = black_box([b' '; 8]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 8;
+}
+
+#[bench]
+fn bench_bytes_a_16(b: &mut Bencher) {
+ let data = black_box([b' '; 16]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 16;
+}
+
+#[bench]
+fn bench_bytes_b_32(b: &mut Bencher) {
+ let data = black_box([b' '; 32]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 32;
+}
+
+#[bench]
+fn bench_bytes_c_128(b: &mut Bencher) {
+ let data = black_box([b' '; 128]);
+ b.iter(|| hash_bytes(SipHasher::default(), &data));
+ b.bytes = 128;
+}
--- /dev/null
+use core::iter::*;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_rposition(b: &mut Bencher) {
+ let it: Vec<usize> = (0..300).collect();
+ b.iter(|| {
+ it.iter().rposition(|&x| x <= 150);
+ });
+}
+
+#[bench]
+fn bench_skip_while(b: &mut Bencher) {
+ b.iter(|| {
+ let it = 0..100;
+ let mut sum = 0;
+ it.skip_while(|&x| {
+ sum += x;
+ sum < 4000
+ })
+ .all(|_| true);
+ });
+}
+
+#[bench]
+fn bench_multiple_take(b: &mut Bencher) {
+ let mut it = (0..42).cycle();
+ b.iter(|| {
+ let n = it.next().unwrap();
+ for _ in 0..n {
+ it.clone().take(it.next().unwrap()).all(|_| true);
+ }
+ });
+}
+
+fn scatter(x: i32) -> i32 {
+ (x * 31) % 127
+}
+
+#[bench]
+fn bench_max_by_key(b: &mut Bencher) {
+ b.iter(|| {
+ let it = 0..100;
+ it.map(black_box).max_by_key(|&x| scatter(x))
+ })
+}
+
+// http://www.reddit.com/r/rust/comments/31syce/using_iterators_to_find_the_index_of_the_min_or/
+#[bench]
+fn bench_max_by_key2(b: &mut Bencher) {
+ fn max_index_iter(array: &[i32]) -> usize {
+ array.iter().enumerate().max_by_key(|&(_, item)| item).unwrap().0
+ }
+
+ let mut data = vec![0; 1638];
+ data[514] = 9999;
+
+ b.iter(|| max_index_iter(&data));
+}
+
+#[bench]
+fn bench_max(b: &mut Bencher) {
+ b.iter(|| {
+ let it = 0..100;
+ it.map(black_box).map(scatter).max()
+ })
+}
+
+pub fn copy_zip(xs: &[u8], ys: &mut [u8]) {
+ for (a, b) in ys.iter_mut().zip(xs) {
+ *a = *b;
+ }
+}
+
+pub fn add_zip(xs: &[f32], ys: &mut [f32]) {
+ for (a, b) in ys.iter_mut().zip(xs) {
+ *a += *b;
+ }
+}
+
+#[bench]
+fn bench_zip_copy(b: &mut Bencher) {
+ let source = vec![0u8; 16 * 1024];
+ let mut dst = black_box(vec![0u8; 16 * 1024]);
+ b.iter(|| copy_zip(&source, &mut dst))
+}
+
+#[bench]
+fn bench_zip_add(b: &mut Bencher) {
+ let source = vec![1.; 16 * 1024];
+ let mut dst = vec![0.; 16 * 1024];
+ b.iter(|| add_zip(&source, &mut dst));
+}
+
+/// `Iterator::for_each` implemented as a plain loop.
+fn for_each_loop<I, F>(iter: I, mut f: F)
+where
+ I: Iterator,
+ F: FnMut(I::Item),
+{
+ for item in iter {
+ f(item);
+ }
+}
+
+/// `Iterator::for_each` implemented with `fold` for internal iteration.
+/// (except when `by_ref()` effectively disables that optimization.)
+fn for_each_fold<I, F>(iter: I, mut f: F)
+where
+ I: Iterator,
+ F: FnMut(I::Item),
+{
+ iter.fold((), move |(), item| f(item));
+}
+
+#[bench]
+fn bench_for_each_chain_loop(b: &mut Bencher) {
+ b.iter(|| {
+ let mut acc = 0;
+ let iter = (0i64..1000000).chain(0..1000000).map(black_box);
+ for_each_loop(iter, |x| acc += x);
+ acc
+ });
+}
+
+#[bench]
+fn bench_for_each_chain_fold(b: &mut Bencher) {
+ b.iter(|| {
+ let mut acc = 0;
+ let iter = (0i64..1000000).chain(0..1000000).map(black_box);
+ for_each_fold(iter, |x| acc += x);
+ acc
+ });
+}
+
+#[bench]
+fn bench_for_each_chain_ref_fold(b: &mut Bencher) {
+ b.iter(|| {
+ let mut acc = 0;
+ let mut iter = (0i64..1000000).chain(0..1000000).map(black_box);
+ for_each_fold(iter.by_ref(), |x| acc += x);
+ acc
+ });
+}
+
+/// Helper to benchmark `sum` for iterators taken by value which
+/// can optimize `fold`, and by reference which cannot.
+macro_rules! bench_sums {
+ ($bench_sum:ident, $bench_ref_sum:ident, $iter:expr) => {
+ #[bench]
+ fn $bench_sum(b: &mut Bencher) {
+ b.iter(|| -> i64 { $iter.map(black_box).sum() });
+ }
+
+ #[bench]
+ fn $bench_ref_sum(b: &mut Bencher) {
+ b.iter(|| -> i64 { $iter.map(black_box).by_ref().sum() });
+ }
+ };
+}
+
+bench_sums! {
+ bench_flat_map_sum,
+ bench_flat_map_ref_sum,
+ (0i64..1000).flat_map(|x| x..x+1000)
+}
+
+bench_sums! {
+ bench_flat_map_chain_sum,
+ bench_flat_map_chain_ref_sum,
+ (0i64..1000000).flat_map(|x| once(x).chain(once(x)))
+}
+
+bench_sums! {
+ bench_enumerate_sum,
+ bench_enumerate_ref_sum,
+ (0i64..1000000).enumerate().map(|(i, x)| x * i as i64)
+}
+
+bench_sums! {
+ bench_enumerate_chain_sum,
+ bench_enumerate_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).enumerate().map(|(i, x)| x * i as i64)
+}
+
+bench_sums! {
+ bench_filter_sum,
+ bench_filter_ref_sum,
+ (0i64..1000000).filter(|x| x % 3 == 0)
+}
+
+bench_sums! {
+ bench_filter_chain_sum,
+ bench_filter_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).filter(|x| x % 3 == 0)
+}
+
+bench_sums! {
+ bench_filter_map_sum,
+ bench_filter_map_ref_sum,
+ (0i64..1000000).filter_map(|x| x.checked_mul(x))
+}
+
+bench_sums! {
+ bench_filter_map_chain_sum,
+ bench_filter_map_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).filter_map(|x| x.checked_mul(x))
+}
+
+bench_sums! {
+ bench_fuse_sum,
+ bench_fuse_ref_sum,
+ (0i64..1000000).fuse()
+}
+
+bench_sums! {
+ bench_fuse_chain_sum,
+ bench_fuse_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).fuse()
+}
+
+bench_sums! {
+ bench_inspect_sum,
+ bench_inspect_ref_sum,
+ (0i64..1000000).inspect(|_| {})
+}
+
+bench_sums! {
+ bench_inspect_chain_sum,
+ bench_inspect_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).inspect(|_| {})
+}
+
+bench_sums! {
+ bench_peekable_sum,
+ bench_peekable_ref_sum,
+ (0i64..1000000).peekable()
+}
+
+bench_sums! {
+ bench_peekable_chain_sum,
+ bench_peekable_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).peekable()
+}
+
+bench_sums! {
+ bench_skip_sum,
+ bench_skip_ref_sum,
+ (0i64..1000000).skip(1000)
+}
+
+bench_sums! {
+ bench_skip_chain_sum,
+ bench_skip_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).skip(1000)
+}
+
+bench_sums! {
+ bench_skip_while_sum,
+ bench_skip_while_ref_sum,
+ (0i64..1000000).skip_while(|&x| x < 1000)
+}
+
+bench_sums! {
+ bench_skip_while_chain_sum,
+ bench_skip_while_chain_ref_sum,
+ (0i64..1000000).chain(0..1000000).skip_while(|&x| x < 1000)
+}
+
+bench_sums! {
+ bench_take_while_chain_sum,
+ bench_take_while_chain_ref_sum,
+ (0i64..1000000).chain(1000000..).take_while(|&x| x < 1111111)
+}
+
+bench_sums! {
+ bench_cycle_take_sum,
+ bench_cycle_take_ref_sum,
+ (0i64..10000).cycle().take(1000000)
+}
+
+// Checks whether Skip<Zip<A,B>> is as fast as Zip<Skip<A>, Skip<B>>, from
+// https://users.rust-lang.org/t/performance-difference-between-iterator-zip-and-skip-order/15743
+#[bench]
+fn bench_zip_then_skip(b: &mut Bencher) {
+ let v: Vec<_> = (0..100_000).collect();
+ let t: Vec<_> = (0..100_000).collect();
+
+ b.iter(|| {
+ let s = v
+ .iter()
+ .zip(t.iter())
+ .skip(10000)
+ .take_while(|t| *t.0 < 10100)
+ .map(|(a, b)| *a + *b)
+ .sum::<u64>();
+ assert_eq!(s, 2009900);
+ });
+}
+#[bench]
+fn bench_skip_then_zip(b: &mut Bencher) {
+ let v: Vec<_> = (0..100_000).collect();
+ let t: Vec<_> = (0..100_000).collect();
+
+ b.iter(|| {
+ let s = v
+ .iter()
+ .skip(10000)
+ .zip(t.iter().skip(10000))
+ .take_while(|t| *t.0 < 10100)
+ .map(|(a, b)| *a + *b)
+ .sum::<u64>();
+ assert_eq!(s, 2009900);
+ });
+}
+
+#[bench]
+fn bench_filter_count(b: &mut Bencher) {
+ b.iter(|| (0i64..1000000).map(black_box).filter(|x| x % 3 == 0).count())
+}
+
+#[bench]
+fn bench_filter_ref_count(b: &mut Bencher) {
+ b.iter(|| (0i64..1000000).map(black_box).by_ref().filter(|x| x % 3 == 0).count())
+}
+
+#[bench]
+fn bench_filter_chain_count(b: &mut Bencher) {
+ b.iter(|| (0i64..1000000).chain(0..1000000).map(black_box).filter(|x| x % 3 == 0).count())
+}
+
+#[bench]
+fn bench_filter_chain_ref_count(b: &mut Bencher) {
+ b.iter(|| {
+ (0i64..1000000).chain(0..1000000).map(black_box).by_ref().filter(|x| x % 3 == 0).count()
+ })
+}
+
+#[bench]
+fn bench_partial_cmp(b: &mut Bencher) {
+ b.iter(|| (0..100000).map(black_box).partial_cmp((0..100000).map(black_box)))
+}
+
+#[bench]
+fn bench_lt(b: &mut Bencher) {
+ b.iter(|| (0..100000).map(black_box).lt((0..100000).map(black_box)))
+}
--- /dev/null
+// wasm32 does not support benches (no time).
+#![cfg(not(target_arch = "wasm32"))]
+#![feature(flt2dec)]
+#![feature(test)]
+
+extern crate test;
+
+mod any;
+mod ascii;
+mod char;
+mod fmt;
+mod hash;
+mod iter;
+mod num;
+mod ops;
+mod pattern;
+mod slice;
--- /dev/null
+use test::Bencher;
+
+#[bench]
+fn bench_0(b: &mut Bencher) {
+ b.iter(|| "0.0".parse::<f64>());
+}
+
+#[bench]
+fn bench_42(b: &mut Bencher) {
+ b.iter(|| "42".parse::<f64>());
+}
+
+#[bench]
+fn bench_huge_int(b: &mut Bencher) {
+ // 2^128 - 1
+ b.iter(|| "170141183460469231731687303715884105727".parse::<f64>());
+}
+
+#[bench]
+fn bench_short_decimal(b: &mut Bencher) {
+ b.iter(|| "1234.5678".parse::<f64>());
+}
+
+#[bench]
+fn bench_pi_long(b: &mut Bencher) {
+ b.iter(|| "3.14159265358979323846264338327950288".parse::<f64>());
+}
+
+#[bench]
+fn bench_pi_short(b: &mut Bencher) {
+ b.iter(|| "3.141592653589793".parse::<f64>())
+}
+
+#[bench]
+fn bench_1e150(b: &mut Bencher) {
+ b.iter(|| "1e150".parse::<f64>());
+}
+
+#[bench]
+fn bench_long_decimal_and_exp(b: &mut Bencher) {
+ b.iter(|| "727501488517303786137132964064381141071e-123".parse::<f64>());
+}
+
+#[bench]
+fn bench_min_subnormal(b: &mut Bencher) {
+ b.iter(|| "5e-324".parse::<f64>());
+}
+
+#[bench]
+fn bench_min_normal(b: &mut Bencher) {
+ b.iter(|| "2.2250738585072014e-308".parse::<f64>());
+}
+
+#[bench]
+fn bench_max(b: &mut Bencher) {
+ b.iter(|| "1.7976931348623157e308".parse::<f64>());
+}
--- /dev/null
+mod strategy {
+ mod dragon;
+ mod grisu;
+}
+
+use core::num::flt2dec::MAX_SIG_DIGITS;
+use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
+use std::io::Write;
+use std::vec::Vec;
+use test::Bencher;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {:?} instead", full_decoded),
+ }
+}
+
+#[bench]
+fn bench_small_shortest(b: &mut Bencher) {
+ let mut buf = Vec::with_capacity(20);
+
+ b.iter(|| {
+ buf.clear();
+ write!(&mut buf, "{}", 3.1415926f64).unwrap()
+ });
+}
+
+#[bench]
+fn bench_big_shortest(b: &mut Bencher) {
+ let mut buf = Vec::with_capacity(300);
+
+ b.iter(|| {
+ buf.clear();
+ write!(&mut buf, "{}", f64::MAX).unwrap()
+ });
+}
--- /dev/null
+use super::super::*;
+use core::num::flt2dec::strategy::dragon::*;
+use std::mem::MaybeUninit;
+use test::Bencher;
+
+#[bench]
+fn bench_small_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_big_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_small_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
--- /dev/null
+use super::super::*;
+use core::num::flt2dec::strategy::grisu::*;
+use std::mem::MaybeUninit;
+use test::Bencher;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {:?} instead", full_decoded),
+ }
+}
+
+#[bench]
+fn bench_small_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_big_shortest(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); MAX_SIG_DIGITS];
+ b.iter(|| {
+ format_shortest(&decoded, &mut buf);
+ });
+}
+
+#[bench]
+fn bench_small_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_3(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 3];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_12(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 12];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_small_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(3.141592f64);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
+
+#[bench]
+fn bench_big_exact_inf(b: &mut Bencher) {
+ let decoded = decode_finite(f64::MAX);
+ let mut buf = [MaybeUninit::new(0); 1024];
+ b.iter(|| {
+ format_exact(&decoded, &mut buf, i16::MIN);
+ });
+}
--- /dev/null
+mod dec2flt;
+mod flt2dec;
+
+use std::str::FromStr;
+use test::Bencher;
+
+const ASCII_NUMBERS: [&str; 19] = [
+ "0",
+ "1",
+ "2",
+ "43",
+ "765",
+ "76567",
+ "987245987",
+ "-4aa32",
+ "1786235",
+ "8723095",
+ "f##5s",
+ "83638730",
+ "-2345",
+ "562aa43",
+ "-1",
+ "-0",
+ "abc",
+ "xyz",
+ "c0ffee",
+];
+
+macro_rules! from_str_bench {
+ ($mac:ident, $t:ty) => {
+ #[bench]
+ fn $mac(b: &mut Bencher) {
+ b.iter(|| {
+ ASCII_NUMBERS
+ .iter()
+ .cycle()
+ .take(5_000)
+ .filter_map(|s| <$t>::from_str(s).ok())
+ .max()
+ })
+ }
+ };
+}
+
+macro_rules! from_str_radix_bench {
+ ($mac:ident, $t:ty, $radix:expr) => {
+ #[bench]
+ fn $mac(b: &mut Bencher) {
+ b.iter(|| {
+ ASCII_NUMBERS
+ .iter()
+ .cycle()
+ .take(5_000)
+ .filter_map(|s| <$t>::from_str_radix(s, $radix).ok())
+ .max()
+ })
+ }
+ };
+}
+
+from_str_bench!(bench_u8_from_str, u8);
+from_str_radix_bench!(bench_u8_from_str_radix_2, u8, 2);
+from_str_radix_bench!(bench_u8_from_str_radix_10, u8, 10);
+from_str_radix_bench!(bench_u8_from_str_radix_16, u8, 16);
+from_str_radix_bench!(bench_u8_from_str_radix_36, u8, 36);
+
+from_str_bench!(bench_u16_from_str, u16);
+from_str_radix_bench!(bench_u16_from_str_radix_2, u16, 2);
+from_str_radix_bench!(bench_u16_from_str_radix_10, u16, 10);
+from_str_radix_bench!(bench_u16_from_str_radix_16, u16, 16);
+from_str_radix_bench!(bench_u16_from_str_radix_36, u16, 36);
+
+from_str_bench!(bench_u32_from_str, u32);
+from_str_radix_bench!(bench_u32_from_str_radix_2, u32, 2);
+from_str_radix_bench!(bench_u32_from_str_radix_10, u32, 10);
+from_str_radix_bench!(bench_u32_from_str_radix_16, u32, 16);
+from_str_radix_bench!(bench_u32_from_str_radix_36, u32, 36);
+
+from_str_bench!(bench_u64_from_str, u64);
+from_str_radix_bench!(bench_u64_from_str_radix_2, u64, 2);
+from_str_radix_bench!(bench_u64_from_str_radix_10, u64, 10);
+from_str_radix_bench!(bench_u64_from_str_radix_16, u64, 16);
+from_str_radix_bench!(bench_u64_from_str_radix_36, u64, 36);
+
+from_str_bench!(bench_i8_from_str, i8);
+from_str_radix_bench!(bench_i8_from_str_radix_2, i8, 2);
+from_str_radix_bench!(bench_i8_from_str_radix_10, i8, 10);
+from_str_radix_bench!(bench_i8_from_str_radix_16, i8, 16);
+from_str_radix_bench!(bench_i8_from_str_radix_36, i8, 36);
+
+from_str_bench!(bench_i16_from_str, i16);
+from_str_radix_bench!(bench_i16_from_str_radix_2, i16, 2);
+from_str_radix_bench!(bench_i16_from_str_radix_10, i16, 10);
+from_str_radix_bench!(bench_i16_from_str_radix_16, i16, 16);
+from_str_radix_bench!(bench_i16_from_str_radix_36, i16, 36);
+
+from_str_bench!(bench_i32_from_str, i32);
+from_str_radix_bench!(bench_i32_from_str_radix_2, i32, 2);
+from_str_radix_bench!(bench_i32_from_str_radix_10, i32, 10);
+from_str_radix_bench!(bench_i32_from_str_radix_16, i32, 16);
+from_str_radix_bench!(bench_i32_from_str_radix_36, i32, 36);
+
+from_str_bench!(bench_i64_from_str, i64);
+from_str_radix_bench!(bench_i64_from_str_radix_2, i64, 2);
+from_str_radix_bench!(bench_i64_from_str_radix_10, i64, 10);
+from_str_radix_bench!(bench_i64_from_str_radix_16, i64, 16);
+from_str_radix_bench!(bench_i64_from_str_radix_36, i64, 36);
--- /dev/null
+use core::ops::*;
+use test::Bencher;
+
+// Overhead of dtors
+
+struct HasDtor {
+ _x: isize,
+}
+
+impl Drop for HasDtor {
+ fn drop(&mut self) {}
+}
+
+#[bench]
+fn alloc_obj_with_dtor(b: &mut Bencher) {
+ b.iter(|| {
+ HasDtor { _x: 10 };
+ })
+}
--- /dev/null
+use test::black_box;
+use test::Bencher;
+
+#[bench]
+fn starts_with_char(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.starts_with('k'));
+ }
+ })
+}
+
+#[bench]
+fn starts_with_str(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.starts_with("k"));
+ }
+ })
+}
+
+#[bench]
+fn ends_with_char(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.ends_with('k'));
+ }
+ })
+}
+
+#[bench]
+fn ends_with_str(b: &mut Bencher) {
+ let text = black_box("kdjsfhlakfhlsghlkvcnljknfqiunvcijqenwodind");
+ b.iter(|| {
+ for _ in 0..1024 {
+ black_box(text.ends_with("k"));
+ }
+ })
+}
--- /dev/null
+use test::black_box;
+use test::Bencher;
+
+enum Cache {
+ L1,
+ L2,
+ L3,
+}
+
+fn binary_search<F>(b: &mut Bencher, cache: Cache, mapper: F)
+where
+ F: Fn(usize) -> usize,
+{
+ let size = match cache {
+ Cache::L1 => 1000, // 8kb
+ Cache::L2 => 10_000, // 80kb
+ Cache::L3 => 1_000_000, // 8Mb
+ };
+ let v = (0..size).map(&mapper).collect::<Vec<_>>();
+ let mut r = 0usize;
+ b.iter(move || {
+ // LCG constants from https://en.wikipedia.org/wiki/Numerical_Recipes.
+ r = r.wrapping_mul(1664525).wrapping_add(1013904223);
+ // Lookup the whole range to get 50% hits and 50% misses.
+ let i = mapper(r % size);
+ black_box(v.binary_search(&i).is_ok());
+ })
+}
+
+#[bench]
+fn binary_search_l1(b: &mut Bencher) {
+ binary_search(b, Cache::L1, |i| i * 2);
+}
+
+#[bench]
+fn binary_search_l2(b: &mut Bencher) {
+ binary_search(b, Cache::L2, |i| i * 2);
+}
+
+#[bench]
+fn binary_search_l3(b: &mut Bencher) {
+ binary_search(b, Cache::L3, |i| i * 2);
+}
+
+#[bench]
+fn binary_search_l1_with_dups(b: &mut Bencher) {
+ binary_search(b, Cache::L1, |i| i / 16 * 16);
+}
+
+#[bench]
+fn binary_search_l2_with_dups(b: &mut Bencher) {
+ binary_search(b, Cache::L2, |i| i / 16 * 16);
+}
+
+#[bench]
+fn binary_search_l3_with_dups(b: &mut Bencher) {
+ binary_search(b, Cache::L3, |i| i / 16 * 16);
+}
+
+macro_rules! rotate {
+ ($fn:ident, $n:expr, $mapper:expr) => {
+ #[bench]
+ fn $fn(b: &mut Bencher) {
+ let mut x = (0usize..$n).map(&$mapper).collect::<Vec<_>>();
+ b.iter(|| {
+ for s in 0..x.len() {
+ x[..].rotate_right(s);
+ }
+ black_box(x[0].clone())
+ })
+ }
+ };
+}
+
+#[derive(Clone)]
+struct Rgb(u8, u8, u8);
+
+rotate!(rotate_u8, 32, |i| i as u8);
+rotate!(rotate_rgb, 32, |i| Rgb(i as u8, (i as u8).wrapping_add(7), (i as u8).wrapping_add(42)));
+rotate!(rotate_usize, 32, |i| i);
+rotate!(rotate_16_usize_4, 16, |i| [i; 4]);
+rotate!(rotate_16_usize_5, 16, |i| [i; 5]);
+rotate!(rotate_64_usize_4, 64, |i| [i; 4]);
+rotate!(rotate_64_usize_5, 64, |i| [i; 5]);
--- /dev/null
+use crate::alloc::Layout;
+use crate::cmp;
+use crate::ptr;
+
+/// A memory allocator that can be registered as the standard library’s default
+/// through the `#[global_allocator]` attribute.
+///
+/// Some of the methods require that a memory block be *currently
+/// allocated* via an allocator. This means that:
+///
+/// * the starting address for that memory block was previously
+/// returned by a previous call to an allocation method
+/// such as `alloc`, and
+///
+/// * the memory block has not been subsequently deallocated, where
+/// blocks are deallocated either by being passed to a deallocation
+/// method such as `dealloc` or by being
+/// passed to a reallocation method that returns a non-null pointer.
+///
+///
+/// # Example
+///
+/// ```no_run
+/// use std::alloc::{GlobalAlloc, Layout, alloc};
+/// use std::ptr::null_mut;
+///
+/// struct MyAllocator;
+///
+/// unsafe impl GlobalAlloc for MyAllocator {
+/// unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { null_mut() }
+/// unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
+/// }
+///
+/// #[global_allocator]
+/// static A: MyAllocator = MyAllocator;
+///
+/// fn main() {
+/// unsafe {
+/// assert!(alloc(Layout::new::<u32>()).is_null())
+/// }
+/// }
+/// ```
+///
+/// # Safety
+///
+/// The `GlobalAlloc` trait is an `unsafe` trait for a number of reasons, and
+/// implementors must ensure that they adhere to these contracts:
+///
+/// * It's undefined behavior if global allocators unwind. This restriction may
+/// be lifted in the future, but currently a panic from any of these
+/// functions may lead to memory unsafety.
+///
+/// * `Layout` queries and calculations in general must be correct. Callers of
+/// this trait are allowed to rely on the contracts defined on each method,
+/// and implementors must ensure such contracts remain true.
+#[stable(feature = "global_alloc", since = "1.28.0")]
+pub unsafe trait GlobalAlloc {
+ /// Allocate memory as described by the given `layout`.
+ ///
+ /// Returns a pointer to newly-allocated memory,
+ /// or null to indicate allocation failure.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure that `layout` has non-zero size.
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g., guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// The allocated block of memory may or may not be initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning a null pointer indicates that either memory is exhausted
+ /// or `layout` does not meet this allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return null on memory
+ /// exhaustion rather than aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8;
+
+ /// Deallocate the block of memory at the given `ptr` pointer with the given `layout`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must denote a block of memory currently allocated via
+ /// this allocator,
+ ///
+ /// * `layout` must be the same layout that was used
+ /// to allocate that block of memory,
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout);
+
+ /// Behaves like `alloc`, but also ensures that the contents
+ /// are set to zero before being returned.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe for the same reasons that `alloc` is.
+ /// However the allocated block of memory is guaranteed to be initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning a null pointer indicates that either memory is exhausted
+ /// or `layout` does not meet allocator's size or alignment constraints,
+ /// just as in `alloc`.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let size = layout.size();
+ // SAFETY: the safety contract for `alloc` must be upheld by the caller.
+ let ptr = unsafe { self.alloc(layout) };
+ if !ptr.is_null() {
+ // SAFETY: as allocation succeeded, the region from `ptr`
+ // of size `size` is guaranteed to be valid for writes.
+ unsafe { ptr::write_bytes(ptr, 0, size) };
+ }
+ ptr
+ }
+
+ /// Shrink or grow a block of memory to the given `new_size`.
+ /// The block is described by the given `ptr` pointer and `layout`.
+ ///
+ /// If this returns a non-null pointer, then ownership of the memory block
+ /// referenced by `ptr` has been transferred to this allocator.
+ /// The memory may or may not have been deallocated,
+ /// and should be considered unusable (unless of course it was
+ /// transferred back to the caller again via the return value of
+ /// this method). The new memory block is allocated with `layout`, but
+ /// with the `size` updated to `new_size`.
+ ///
+ /// If this method returns null, then ownership of the memory
+ /// block has not been transferred to this allocator, and the
+ /// contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * `layout` must be the same layout that was used
+ /// to allocate that block of memory,
+ ///
+ /// * `new_size` must be greater than zero.
+ ///
+ /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
+ /// must not overflow (i.e., the rounded value must be less than `usize::MAX`).
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g., guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// # Errors
+ ///
+ /// Returns null if the new layout does not meet the size
+ /// and alignment constraints of the allocator, or if reallocation
+ /// otherwise fails.
+ ///
+ /// Implementations are encouraged to return null on memory
+ /// exhaustion rather than panicking or aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to a
+ /// reallocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: the caller must ensure that the `new_size` does not overflow.
+ // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
+ let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+ // SAFETY: the caller must ensure that `new_layout` is greater than zero.
+ let new_ptr = unsafe { self.alloc(new_layout) };
+ if !new_ptr.is_null() {
+ // SAFETY: the previously allocated block cannot overlap the newly allocated block.
+ // The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
+ self.dealloc(ptr, layout);
+ }
+ }
+ new_ptr
+ }
+}
--- /dev/null
+use crate::cmp;
+use crate::fmt;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr::NonNull;
+
+const fn size_align<T>() -> (usize, usize) {
+ (mem::size_of::<T>(), mem::align_of::<T>())
+}
+
+/// Layout of a block of memory.
+///
+/// An instance of `Layout` describes a particular layout of memory.
+/// You build a `Layout` up as an input to give to an allocator.
+///
+/// All layouts have an associated size and a power-of-two alignment.
+///
+/// (Note that layouts are *not* required to have non-zero size,
+/// even though `GlobalAlloc` requires that all memory requests
+/// be non-zero in size. A caller must either ensure that conditions
+/// like this are met, use specific allocators with looser
+/// requirements, or use the more lenient `AllocRef` interface.)
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[lang = "alloc_layout"]
+pub struct Layout {
+ // size of the requested block of memory, measured in bytes.
+ size_: usize,
+
+ // alignment of the requested block of memory, measured in bytes.
+ // we ensure that this is always a power-of-two, because API's
+ // like `posix_memalign` require it and it is a reasonable
+ // constraint to impose on Layout constructors.
+ //
+ // (However, we do not analogously require `align >= sizeof(void*)`,
+ // even though that is *also* a requirement of `posix_memalign`.)
+ align_: NonZeroUsize,
+}
+
+impl Layout {
+ /// Constructs a `Layout` from a given `size` and `align`,
+ /// or returns `LayoutErr` if any of the following conditions
+ /// are not met:
+ ///
+ /// * `align` must not be zero,
+ ///
+ /// * `align` must be a power of two,
+ ///
+ /// * `size`, when rounded up to the nearest multiple of `align`,
+ /// must not overflow (i.e., the rounded value must be less than
+ /// or equal to `usize::MAX`).
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
+ #[inline]
+ pub const fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutErr> {
+ if !align.is_power_of_two() {
+ return Err(LayoutErr { private: () });
+ }
+
+ // (power-of-two implies align != 0.)
+
+ // Rounded up size is:
+ // size_rounded_up = (size + align - 1) & !(align - 1);
+ //
+ // We know from above that align != 0. If adding (align - 1)
+ // does not overflow, then rounding up will be fine.
+ //
+ // Conversely, &-masking with !(align - 1) will subtract off
+ // only low-order-bits. Thus if overflow occurs with the sum,
+ // the &-mask cannot subtract enough to undo that overflow.
+ //
+ // Above implies that checking for summation overflow is both
+ // necessary and sufficient.
+ if size > usize::MAX - (align - 1) {
+ return Err(LayoutErr { private: () });
+ }
+
+ // SAFETY: the conditions for `from_size_align_unchecked` have been
+ // checked above.
+ unsafe { Ok(Layout::from_size_align_unchecked(size, align)) }
+ }
+
+ /// Creates a layout, bypassing all checks.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe as it does not verify the preconditions from
+ /// [`Layout::from_size_align`].
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "alloc_layout", since = "1.28.0")]
+ #[inline]
+ pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
+ // SAFETY: the caller must ensure that `align` is greater than zero.
+ Layout { size_: size, align_: unsafe { NonZeroUsize::new_unchecked(align) } }
+ }
+
+ /// The minimum size in bytes for a memory block of this layout.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
+ #[inline]
+ pub const fn size(&self) -> usize {
+ self.size_
+ }
+
+ /// The minimum byte alignment for a memory block of this layout.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
+ #[inline]
+ pub const fn align(&self) -> usize {
+ self.align_.get()
+ }
+
+ /// Constructs a `Layout` suitable for holding a value of type `T`.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")]
+ #[inline]
+ pub const fn new<T>() -> Self {
+ let (size, align) = size_align::<T>();
+ // SAFETY: the align is guaranteed by Rust to be a power of two and
+ // the size+align combo is guaranteed to fit in our address space. As a
+ // result use the unchecked constructor here to avoid inserting code
+ // that panics if it isn't optimized well enough.
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[inline]
+ pub fn for_value<T: ?Sized>(t: &T) -> Self {
+ let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
+ debug_assert!(Layout::from_size_align(size, align).is_ok());
+ // SAFETY: see rationale in `new` for why this is using the unsafe variant
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ ///
+ /// # Safety
+ ///
+ /// This function is only safe to call if the following conditions hold:
+ ///
+ /// - If `T` is `Sized`, this function is always safe to call.
+ /// - If the unsized tail of `T` is:
+ /// - a [slice], then the length of the slice tail must be an intialized
+ /// integer, and the size of the *entire value*
+ /// (dynamic tail length + statically sized prefix) must fit in `isize`.
+ /// - a [trait object], then the vtable part of the pointer must point
+ /// to a valid vtable for the type `T` acquired by an unsizing coersion,
+ /// and the size of the *entire value*
+ /// (dynamic tail length + statically sized prefix) must fit in `isize`.
+ /// - an (unstable) [extern type], then this function is always safe to
+ /// call, but may panic or otherwise return the wrong value, as the
+ /// extern type's layout is not known. This is the same behavior as
+ /// [`Layout::for_value`] on a reference to an extern type tail.
+ /// - otherwise, it is conservatively not allowed to call this function.
+ ///
+ /// [slice]: ../../std/primitive.slice.html
+ /// [trait object]: ../../book/ch17-02-trait-objects.html
+ /// [extern type]: ../../unstable-book/language-features/extern-types.html
+ #[unstable(feature = "layout_for_ptr", issue = "69835")]
+ pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
+ // SAFETY: we pass along the prerequisites of these functions to the caller
+ let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
+ debug_assert!(Layout::from_size_align(size, align).is_ok());
+ // SAFETY: see rationale in `new` for why this is using the unsafe variant
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Creates a `NonNull` that is dangling, but well-aligned for this Layout.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer,
+ /// which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub const fn dangling(&self) -> NonNull<u8> {
+ // SAFETY: align is guaranteed to be non-zero
+ unsafe { NonNull::new_unchecked(self.align() as *mut u8) }
+ }
+
+ /// Creates a layout describing the record that can hold a value
+ /// of the same layout as `self`, but that also is aligned to
+ /// alignment `align` (measured in bytes).
+ ///
+ /// If `self` already meets the prescribed alignment, then returns
+ /// `self`.
+ ///
+ /// Note that this method does not add any padding to the overall
+ /// size, regardless of whether the returned layout has a different
+ /// alignment. In other words, if `K` has size 16, `K.align_to(32)`
+ /// will *still* have size 16.
+ ///
+ /// Returns an error if the combination of `self.size()` and the given
+ /// `align` violates the conditions listed in [`Layout::from_size_align`].
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn align_to(&self, align: usize) -> Result<Self, LayoutErr> {
+ Layout::from_size_align(self.size(), cmp::max(self.align(), align))
+ }
+
+ /// Returns the amount of padding we must insert after `self`
+ /// to ensure that the following address will satisfy `align`
+ /// (measured in bytes).
+ ///
+ /// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)`
+ /// returns 3, because that is the minimum number of bytes of
+ /// padding required to get a 4-aligned address (assuming that the
+ /// corresponding memory block starts at a 4-aligned address).
+ ///
+ /// The return value of this function has no meaning if `align` is
+ /// not a power-of-two.
+ ///
+ /// Note that the utility of the returned value requires `align`
+ /// to be less than or equal to the alignment of the starting
+ /// address for the whole allocated block of memory. One way to
+ /// satisfy this constraint is to ensure `align <= self.align()`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
+ #[inline]
+ pub const fn padding_needed_for(&self, align: usize) -> usize {
+ let len = self.size();
+
+ // Rounded up value is:
+ // len_rounded_up = (len + align - 1) & !(align - 1);
+ // and then we return the padding difference: `len_rounded_up - len`.
+ //
+ // We use modular arithmetic throughout:
+ //
+ // 1. align is guaranteed to be > 0, so align - 1 is always
+ // valid.
+ //
+ // 2. `len + align - 1` can overflow by at most `align - 1`,
+ // so the &-mask with `!(align - 1)` will ensure that in the
+ // case of overflow, `len_rounded_up` will itself be 0.
+ // Thus the returned padding, when added to `len`, yields 0,
+ // which trivially satisfies the alignment `align`.
+ //
+ // (Of course, attempts to allocate blocks of memory whose
+ // size and padding overflow in the above manner should cause
+ // the allocator to yield an error anyway.)
+
+ let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+ len_rounded_up.wrapping_sub(len)
+ }
+
+ /// Creates a layout by rounding the size of this layout up to a multiple
+ /// of the layout's alignment.
+ ///
+ /// This is equivalent to adding the result of `padding_needed_for`
+ /// to the layout's current size.
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn pad_to_align(&self) -> Layout {
+ let pad = self.padding_needed_for(self.align());
+ // This cannot overflow. Quoting from the invariant of Layout:
+ // > `size`, when rounded up to the nearest multiple of `align`,
+ // > must not overflow (i.e., the rounded value must be less than
+ // > `usize::MAX`)
+ let new_size = self.size() + pad;
+
+ Layout::from_size_align(new_size, self.align()).unwrap()
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with a suitable amount of padding between each to
+ /// ensure that each instance is given its requested size and
+ /// alignment. On success, returns `(k, offs)` where `k` is the
+ /// layout of the array and `offs` is the distance between the start
+ /// of each element in the array.
+ ///
+ /// On arithmetic overflow, returns `LayoutErr`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> {
+ // This cannot overflow. Quoting from the invariant of Layout:
+ // > `size`, when rounded up to the nearest multiple of `align`,
+ // > must not overflow (i.e., the rounded value must be less than
+ // > `usize::MAX`)
+ let padded_size = self.size() + self.padding_needed_for(self.align());
+ let alloc_size = padded_size.checked_mul(n).ok_or(LayoutErr { private: () })?;
+
+ // SAFETY: self.align is already known to be valid and alloc_size has been
+ // padded already.
+ unsafe { Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) }
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next`, including any necessary padding to ensure that `next`
+ /// will be properly aligned, but *no trailing padding*.
+ ///
+ /// In order to match C representation layout `repr(C)`, you should
+ /// call `pad_to_align` after extending the layout with all fields.
+ /// (There is no way to match the default Rust representation
+ /// layout `repr(Rust)`, as it is unspecified.)
+ ///
+ /// Note that the alignment of the resulting layout will be the maximum of
+ /// those of `self` and `next`, in order to ensure alignment of both parts.
+ ///
+ /// Returns `Ok((k, offset))`, where `k` is layout of the concatenated
+ /// record and `offset` is the relative location, in bytes, of the
+ /// start of the `next` embedded within the concatenated record
+ /// (assuming that the record itself starts at offset 0).
+ ///
+ /// On arithmetic overflow, returns `LayoutErr`.
+ ///
+ /// # Examples
+ ///
+ /// To calculate the layout of a `#[repr(C)]` structure and the offsets of
+ /// the fields from its fields' layouts:
+ ///
+ /// ```rust
+ /// # use std::alloc::{Layout, LayoutErr};
+ /// pub fn repr_c(fields: &[Layout]) -> Result<(Layout, Vec<usize>), LayoutErr> {
+ /// let mut offsets = Vec::new();
+ /// let mut layout = Layout::from_size_align(0, 1)?;
+ /// for &field in fields {
+ /// let (new_layout, offset) = layout.extend(field)?;
+ /// layout = new_layout;
+ /// offsets.push(offset);
+ /// }
+ /// // Remember to finalize with `pad_to_align`!
+ /// Ok((layout.pad_to_align(), offsets))
+ /// }
+ /// # // test that it works
+ /// # #[repr(C)] struct S { a: u64, b: u32, c: u16, d: u32 }
+ /// # let s = Layout::new::<S>();
+ /// # let u16 = Layout::new::<u16>();
+ /// # let u32 = Layout::new::<u32>();
+ /// # let u64 = Layout::new::<u64>();
+ /// # assert_eq!(repr_c(&[u64, u32, u16, u32]), Ok((s, vec![0, 8, 12, 16])));
+ /// ```
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> {
+ let new_align = cmp::max(self.align(), next.align());
+ let pad = self.padding_needed_for(next.align());
+
+ let offset = self.size().checked_add(pad).ok_or(LayoutErr { private: () })?;
+ let new_size = offset.checked_add(next.size()).ok_or(LayoutErr { private: () })?;
+
+ let layout = Layout::from_size_align(new_size, new_align)?;
+ Ok((layout, offset))
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with no padding between each instance.
+ ///
+ /// Note that, unlike `repeat`, `repeat_packed` does not guarantee
+ /// that the repeated instances of `self` will be properly
+ /// aligned, even if a given instance of `self` is properly
+ /// aligned. In other words, if the layout returned by
+ /// `repeat_packed` is used to allocate an array, it is not
+ /// guaranteed that all elements in the array will be properly
+ /// aligned.
+ ///
+ /// On arithmetic overflow, returns `LayoutErr`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutErr> {
+ let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?;
+ Layout::from_size_align(size, self.align())
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next` with no additional padding between the two. Since no
+ /// padding is inserted, the alignment of `next` is irrelevant,
+ /// and is not incorporated *at all* into the resulting layout.
+ ///
+ /// On arithmetic overflow, returns `LayoutErr`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutErr> {
+ let new_size = self.size().checked_add(next.size()).ok_or(LayoutErr { private: () })?;
+ Layout::from_size_align(new_size, self.align())
+ }
+
+ /// Creates a layout describing the record for a `[T; n]`.
+ ///
+ /// On arithmetic overflow, returns `LayoutErr`.
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn array<T>(n: usize) -> Result<Self, LayoutErr> {
+ let (layout, offset) = Layout::new::<T>().repeat(n)?;
+ debug_assert_eq!(offset, mem::size_of::<T>());
+ Ok(layout.pad_to_align())
+ }
+}
+
+/// The parameters given to `Layout::from_size_align`
+/// or some other `Layout` constructor
+/// do not satisfy its documented constraints.
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct LayoutErr {
+ private: (),
+}
+
+// (we need this for downstream impl of trait Error)
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+impl fmt::Display for LayoutErr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("invalid parameters to Layout::from_size_align")
+ }
+}
--- /dev/null
+//! Memory allocation APIs
+
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+mod global;
+mod layout;
+
+#[stable(feature = "global_alloc", since = "1.28.0")]
+pub use self::global::GlobalAlloc;
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+pub use self::layout::{Layout, LayoutErr};
+
+use crate::fmt;
+use crate::ptr::{self, NonNull};
+
+/// The `AllocError` error indicates an allocation failure
+/// that may be due to resource exhaustion or to
+/// something wrong when combining the given input arguments with this
+/// allocator.
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct AllocError;
+
+// (we need this for downstream impl of trait Error)
+#[unstable(feature = "allocator_api", issue = "32838")]
+impl fmt::Display for AllocError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("memory allocation failed")
+ }
+}
+
+/// An implementation of `AllocRef` can allocate, grow, shrink, and deallocate arbitrary blocks of
+/// data described via [`Layout`][].
+///
+/// `AllocRef` is designed to be implemented on ZSTs, references, or smart pointers because having
+/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
+/// allocated memory.
+///
+/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `AllocRef`. If an underlying
+/// allocator does not support this (like jemalloc) or return a null pointer (such as
+/// `libc::malloc`), this must be caught by the implementation.
+///
+/// ### Currently allocated memory
+///
+/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
+/// means that:
+///
+/// * the starting address for that memory block was previously returned by [`alloc`], [`grow`], or
+/// [`shrink`], and
+///
+/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
+/// directly by being passed to [`dealloc`] or were changed by being passed to [`grow`] or
+/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
+/// remains valid.
+///
+/// [`alloc`]: AllocRef::alloc
+/// [`grow`]: AllocRef::grow
+/// [`shrink`]: AllocRef::shrink
+/// [`dealloc`]: AllocRef::dealloc
+///
+/// ### Memory fitting
+///
+/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
+/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
+/// following conditions must hold:
+///
+/// * The block must be allocated with the same alignment as [`layout.align()`], and
+///
+/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
+/// - `min` is the size of the layout most recently used to allocate the block, and
+/// - `max` is the latest actual size returned from [`alloc`], [`grow`], or [`shrink`].
+///
+/// [`layout.align()`]: Layout::align
+/// [`layout.size()`]: Layout::size
+///
+/// # Safety
+///
+/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
+/// until the instance and all of its clones are dropped,
+///
+/// * cloning or moving the allocator must not invalidate memory blocks returned from this
+/// allocator. A cloned allocator must behave like the same allocator, and
+///
+/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
+/// method of the allocator.
+///
+/// [*currently allocated*]: #currently-allocated-memory
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub unsafe trait AllocRef {
+ /// Attempts to allocate a block of memory.
+ ///
+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
+ ///
+ /// The returned block may have a larger size than specified by `layout.size()`, and may or may
+ /// not have its contents initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn alloc(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
+
+ /// Behaves like `alloc`, but also ensures that the returned memory is zero-initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn alloc_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = self.alloc(layout)?;
+ // SAFETY: `alloc` returns a valid memory block
+ unsafe { ptr.as_non_null_ptr().as_ptr().write_bytes(0, ptr.len()) }
+ Ok(ptr)
+ }
+
+ /// Deallocates the memory referenced by `ptr`.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
+ /// * `layout` must [*fit*] that block of memory.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout);
+
+ /// Attempts to extend the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. The memory may or may not have been freed, and should be
+ /// considered unusable unless it was transferred back to the caller again via the return value
+ /// of this method.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.alloc(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
+ self.dealloc(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Behaves like `grow`, but also ensures that the new contents are set to zero before being
+ /// returned.
+ ///
+ /// The memory block will contain the following contents after a successful call to
+ /// `grow_zeroed`:
+ /// * Bytes `0..old_layout.size()` are preserved from the original allocation.
+ /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
+ /// the allocator implementation. `old_size` refers to the size of the memory block prior
+ /// to the `grow_zeroed` call, which may be larger than the size that was originally
+ /// requested when it was allocated.
+ /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
+ /// block returned by the `grow_zeroed` call.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.alloc_zeroed(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
+ self.dealloc(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Attempts to shrink the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. The memory may or may not have been freed, and should be
+ /// considered unusable unless it was transferred back to the caller again via the return value
+ /// of this method.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if shrinking otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.alloc(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be lower than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_layout.size());
+ self.dealloc(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Creates a "by reference" adaptor for this instance of `AllocRef`.
+ ///
+ /// The returned adaptor also implements `AllocRef` and will simply borrow this.
+ #[inline(always)]
+ fn by_ref(&self) -> &Self {
+ self
+ }
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+unsafe impl<A> AllocRef for &A
+where
+ A: AllocRef + ?Sized,
+{
+ #[inline]
+ fn alloc(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).alloc(layout)
+ }
+
+ #[inline]
+ fn alloc_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).alloc_zeroed(layout)
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: NonNull<u8>, layout: Layout) {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).dealloc(ptr, layout) }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow(ptr, old_layout, new_layout) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).shrink(ptr, old_layout, new_layout) }
+ }
+}
--- /dev/null
+//! This module implements the `Any` trait, which enables dynamic typing
+//! of any `'static` type through runtime reflection.
+//!
+//! `Any` itself can be used to get a `TypeId`, and has more features when used
+//! as a trait object. As `&dyn Any` (a borrowed trait object), it has the `is`
+//! and `downcast_ref` methods, to test if the contained value is of a given type,
+//! and to get a reference to the inner value as a type. As `&mut dyn Any`, there
+//! is also the `downcast_mut` method, for getting a mutable reference to the
+//! inner value. `Box<dyn Any>` adds the `downcast` method, which attempts to
+//! convert to a `Box<T>`. See the [`Box`] documentation for the full details.
+//!
+//! Note that `&dyn Any` is limited to testing whether a value is of a specified
+//! concrete type, and cannot be used to test whether a type implements a trait.
+//!
+//! [`Box`]: ../../std/boxed/struct.Box.html
+//!
+//! # Examples
+//!
+//! Consider a situation where we want to log out a value passed to a function.
+//! We know the value we're working on implements Debug, but we don't know its
+//! concrete type. We want to give special treatment to certain types: in this
+//! case printing out the length of String values prior to their value.
+//! We don't know the concrete type of our value at compile time, so we need to
+//! use runtime reflection instead.
+//!
+//! ```rust
+//! use std::fmt::Debug;
+//! use std::any::Any;
+//!
+//! // Logger function for any type that implements Debug.
+//! fn log<T: Any + Debug>(value: &T) {
+//! let value_any = value as &dyn Any;
+//!
+//! // Try to convert our value to a `String`. If successful, we want to
+//! // output the String`'s length as well as its value. If not, it's a
+//! // different type: just print it out unadorned.
+//! match value_any.downcast_ref::<String>() {
+//! Some(as_string) => {
+//! println!("String ({}): {}", as_string.len(), as_string);
+//! }
+//! None => {
+//! println!("{:?}", value);
+//! }
+//! }
+//! }
+//!
+//! // This function wants to log its parameter out prior to doing work with it.
+//! fn do_work<T: Any + Debug>(value: &T) {
+//! log(value);
+//! // ...do some other work
+//! }
+//!
+//! fn main() {
+//! let my_string = "Hello World".to_string();
+//! do_work(&my_string);
+//!
+//! let my_i8: i8 = 100;
+//! do_work(&my_i8);
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fmt;
+use crate::intrinsics;
+
+///////////////////////////////////////////////////////////////////////////////
+// Any trait
+///////////////////////////////////////////////////////////////////////////////
+
+/// A trait to emulate dynamic typing.
+///
+/// Most types implement `Any`. However, any type which contains a non-`'static` reference does not.
+/// See the [module-level documentation][mod] for more details.
+///
+/// [mod]: crate::any
+// This trait is not unsafe, though we rely on the specifics of it's sole impl's
+// `type_id` function in unsafe code (e.g., `downcast`). Normally, that would be
+// a problem, but because the only impl of `Any` is a blanket implementation, no
+// other code can implement `Any`.
+//
+// We could plausibly make this trait unsafe -- it would not cause breakage,
+// since we control all the implementations -- but we choose not to as that's
+// both not really necessary and may confuse users about the distinction of
+// unsafe traits and unsafe methods (i.e., `type_id` would still be safe to call,
+// but we would likely want to indicate as such in documentation).
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Any: 'static {
+ /// Gets the `TypeId` of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::{Any, TypeId};
+ ///
+ /// fn is_string(s: &dyn Any) -> bool {
+ /// TypeId::of::<String>() == s.type_id()
+ /// }
+ ///
+ /// assert_eq!(is_string(&0), false);
+ /// assert_eq!(is_string(&"cookie monster".to_string()), true);
+ /// ```
+ #[stable(feature = "get_type_id", since = "1.34.0")]
+ fn type_id(&self) -> TypeId;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: 'static + ?Sized> Any for T {
+ fn type_id(&self) -> TypeId {
+ TypeId::of::<T>()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Extension methods for Any trait objects.
+///////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for dyn Any {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Any")
+ }
+}
+
+// Ensure that the result of e.g., joining a thread can be printed and
+// hence used with `unwrap`. May eventually no longer be needed if
+// dispatch works with upcasting.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for dyn Any + Send {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Any")
+ }
+}
+
+#[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+impl fmt::Debug for dyn Any + Send + Sync {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Any")
+ }
+}
+
+impl dyn Any {
+ /// Returns `true` if the boxed type is the same as `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn is_string(s: &dyn Any) {
+ /// if s.is::<String>() {
+ /// println!("It's a string!");
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// is_string(&0);
+ /// is_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is<T: Any>(&self) -> bool {
+ // Get `TypeId` of the type this function is instantiated with.
+ let t = TypeId::of::<T>();
+
+ // Get `TypeId` of the type in the trait object (`self`).
+ let concrete = self.type_id();
+
+ // Compare both `TypeId`s on equality.
+ t == concrete
+ }
+
+ /// Returns some reference to the boxed value if it is of type `T`, or
+ /// `None` if it isn't.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(s: &dyn Any) {
+ /// if let Some(string) = s.downcast_ref::<String>() {
+ /// println!("It's a string({}): '{}'", string.len(), string);
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// print_if_string(&0);
+ /// print_if_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ if self.is::<T>() {
+ // SAFETY: just checked whether we are pointing to the correct type, and we can rely on
+ // that check for memory safety because we have implemented Any for all types; no other
+ // impls can exist as they would conflict with our impl.
+ unsafe { Some(&*(self as *const dyn Any as *const T)) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns some mutable reference to the boxed value if it is of type `T`, or
+ /// `None` if it isn't.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn modify_if_u32(s: &mut dyn Any) {
+ /// if let Some(num) = s.downcast_mut::<u32>() {
+ /// *num = 42;
+ /// }
+ /// }
+ ///
+ /// let mut x = 10u32;
+ /// let mut s = "starlord".to_string();
+ ///
+ /// modify_if_u32(&mut x);
+ /// modify_if_u32(&mut s);
+ ///
+ /// assert_eq!(x, 42);
+ /// assert_eq!(&s, "starlord");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
+ if self.is::<T>() {
+ // SAFETY: just checked whether we are pointing to the correct type, and we can rely on
+ // that check for memory safety because we have implemented Any for all types; no other
+ // impls can exist as they would conflict with our impl.
+ unsafe { Some(&mut *(self as *mut dyn Any as *mut T)) }
+ } else {
+ None
+ }
+ }
+}
+
+impl dyn Any + Send {
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn is_string(s: &(dyn Any + Send)) {
+ /// if s.is::<String>() {
+ /// println!("It's a string!");
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// is_string(&0);
+ /// is_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is<T: Any>(&self) -> bool {
+ Any::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(s: &(dyn Any + Send)) {
+ /// if let Some(string) = s.downcast_ref::<String>() {
+ /// println!("It's a string({}): '{}'", string.len(), string);
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// print_if_string(&0);
+ /// print_if_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ Any::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn modify_if_u32(s: &mut (dyn Any + Send)) {
+ /// if let Some(num) = s.downcast_mut::<u32>() {
+ /// *num = 42;
+ /// }
+ /// }
+ ///
+ /// let mut x = 10u32;
+ /// let mut s = "starlord".to_string();
+ ///
+ /// modify_if_u32(&mut x);
+ /// modify_if_u32(&mut s);
+ ///
+ /// assert_eq!(x, 42);
+ /// assert_eq!(&s, "starlord");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
+ Any::downcast_mut::<T>(self)
+ }
+}
+
+impl dyn Any + Send + Sync {
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn is_string(s: &(dyn Any + Send + Sync)) {
+ /// if s.is::<String>() {
+ /// println!("It's a string!");
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// is_string(&0);
+ /// is_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+ #[inline]
+ pub fn is<T: Any>(&self) -> bool {
+ Any::is::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(s: &(dyn Any + Send + Sync)) {
+ /// if let Some(string) = s.downcast_ref::<String>() {
+ /// println!("It's a string({}): '{}'", string.len(), string);
+ /// } else {
+ /// println!("Not a string...");
+ /// }
+ /// }
+ ///
+ /// print_if_string(&0);
+ /// print_if_string(&"cookie monster".to_string());
+ /// ```
+ #[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+ #[inline]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ Any::downcast_ref::<T>(self)
+ }
+
+ /// Forwards to the method defined on the type `Any`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn modify_if_u32(s: &mut (dyn Any + Send + Sync)) {
+ /// if let Some(num) = s.downcast_mut::<u32>() {
+ /// *num = 42;
+ /// }
+ /// }
+ ///
+ /// let mut x = 10u32;
+ /// let mut s = "starlord".to_string();
+ ///
+ /// modify_if_u32(&mut x);
+ /// modify_if_u32(&mut s);
+ ///
+ /// assert_eq!(x, 42);
+ /// assert_eq!(&s, "starlord");
+ /// ```
+ #[stable(feature = "any_send_sync_methods", since = "1.28.0")]
+ #[inline]
+ pub fn downcast_mut<T: Any>(&mut self) -> Option<&mut T> {
+ Any::downcast_mut::<T>(self)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TypeID and its methods
+///////////////////////////////////////////////////////////////////////////////
+
+/// A `TypeId` represents a globally unique identifier for a type.
+///
+/// Each `TypeId` is an opaque object which does not allow inspection of what's
+/// inside but does allow basic operations such as cloning, comparison,
+/// printing, and showing.
+///
+/// A `TypeId` is currently only available for types which ascribe to `'static`,
+/// but this limitation may be removed in the future.
+///
+/// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
+/// noting that the hashes and ordering will vary between Rust releases. Beware
+/// of relying on them inside of your code!
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct TypeId {
+ t: u64,
+}
+
+impl TypeId {
+ /// Returns the `TypeId` of the type this generic function has been
+ /// instantiated with.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::{Any, TypeId};
+ ///
+ /// fn is_string<T: ?Sized + Any>(_s: &T) -> bool {
+ /// TypeId::of::<String>() == TypeId::of::<T>()
+ /// }
+ ///
+ /// assert_eq!(is_string(&0), false);
+ /// assert_eq!(is_string(&"cookie monster".to_string()), true);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+ pub const fn of<T: ?Sized + 'static>() -> TypeId {
+ TypeId { t: intrinsics::type_id::<T>() }
+ }
+}
+
+/// Returns the name of a type as a string slice.
+///
+/// # Note
+///
+/// This is intended for diagnostic use. The exact contents and format of the
+/// string returned are not specified, other than being a best-effort
+/// description of the type. For example, amongst the strings
+/// that `type_name::<Option<String>>()` might return are `"Option<String>"` and
+/// `"std::option::Option<std::string::String>"`.
+///
+/// The returned string must not be considered to be a unique identifier of a
+/// type as multiple types may map to the same type name. Similarly, there is no
+/// guarantee that all parts of a type will appear in the returned string: for
+/// example, lifetime specifiers are currently not included. In addition, the
+/// output may change between versions of the compiler.
+///
+/// The current implementation uses the same infrastructure as compiler
+/// diagnostics and debuginfo, but this is not guaranteed.
+///
+/// # Examples
+///
+/// ```rust
+/// assert_eq!(
+/// std::any::type_name::<Option<String>>(),
+/// "core::option::Option<alloc::string::String>",
+/// );
+/// ```
+#[stable(feature = "type_name", since = "1.38.0")]
+#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+pub const fn type_name<T: ?Sized>() -> &'static str {
+ intrinsics::type_name::<T>()
+}
+
+/// Returns the name of the type of the pointed-to value as a string slice.
+/// This is the same as `type_name::<T>()`, but can be used where the type of a
+/// variable is not easily available.
+///
+/// # Note
+///
+/// This is intended for diagnostic use. The exact contents and format of the
+/// string are not specified, other than being a best-effort description of the
+/// type. For example, `type_name_of_val::<Option<String>>(None)` could return
+/// `"Option<String>"` or `"std::option::Option<std::string::String>"`, but not
+/// `"foobar"`. In addition, the output may change between versions of the
+/// compiler.
+///
+/// This function does not resolve trait objects,
+/// meaning that `type_name_of_val(&7u32 as &dyn Debug)`
+/// may return `"dyn Debug"`, but not `"u32"`.
+///
+/// The type name should not be considered a unique identifier of a type;
+/// multiple types may share the same type name.
+///
+/// The current implementation uses the same infrastructure as compiler
+/// diagnostics and debuginfo, but this is not guaranteed.
+///
+/// # Examples
+///
+/// Prints the default integer and float types.
+///
+/// ```rust
+/// #![feature(type_name_of_val)]
+/// use std::any::type_name_of_val;
+///
+/// let x = 1;
+/// println!("{}", type_name_of_val(&x));
+/// let y = 1.0;
+/// println!("{}", type_name_of_val(&y));
+/// ```
+#[unstable(feature = "type_name_of_val", issue = "66359")]
+#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+pub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
+ type_name::<T>()
+}
--- /dev/null
+//! Defines the `IntoIter` owned iterator for arrays.
+
+use crate::{
+ fmt,
+ iter::{ExactSizeIterator, FusedIterator, TrustedLen},
+ mem::{self, MaybeUninit},
+ ops::Range,
+ ptr,
+};
+
+/// A by-value [array] iterator.
+///
+/// [array]: ../../std/primitive.array.html
+#[unstable(feature = "array_value_iter", issue = "65798")]
+pub struct IntoIter<T, const N: usize> {
+ /// This is the array we are iterating over.
+ ///
+ /// Elements with index `i` where `alive.start <= i < alive.end` have not
+ /// been yielded yet and are valid array entries. Elements with indices `i
+ /// < alive.start` or `i >= alive.end` have been yielded already and must
+ /// not be accessed anymore! Those dead elements might even be in a
+ /// completely uninitialized state!
+ ///
+ /// So the invariants are:
+ /// - `data[alive]` is alive (i.e. contains valid elements)
+ /// - `data[..alive.start]` and `data[alive.end..]` are dead (i.e. the
+ /// elements were already read and must not be touched anymore!)
+ data: [MaybeUninit<T>; N],
+
+ /// The elements in `data` that have not been yielded yet.
+ ///
+ /// Invariants:
+ /// - `alive.start <= alive.end`
+ /// - `alive.end <= N`
+ alive: Range<usize>,
+}
+
+impl<T, const N: usize> IntoIter<T, N> {
+ /// Creates a new iterator over the given `array`.
+ ///
+ /// *Note*: this method might never get stabilized and/or removed in the
+ /// future as there will likely be another, preferred way of obtaining this
+ /// iterator (either via `IntoIterator` for arrays or via another way).
+ #[unstable(feature = "array_value_iter", issue = "65798")]
+ pub fn new(array: [T; N]) -> Self {
+ // SAFETY: The transmute here is actually safe. The docs of `MaybeUninit`
+ // promise:
+ //
+ // > `MaybeUninit<T>` is guaranteed to have the same size and alignment
+ // > as `T`.
+ //
+ // The docs even show a transmute from an array of `MaybeUninit<T>` to
+ // an array of `T`.
+ //
+ // With that, this initialization satisfies the invariants.
+
+ // FIXME(LukasKalbertodt): actually use `mem::transmute` here, once it
+ // works with const generics:
+ // `mem::transmute::<[T; N], [MaybeUninit<T>; N]>(array)`
+ //
+ // Until then, we can use `mem::transmute_copy` to create a bitwise copy
+ // as a different type, then forget `array` so that it is not dropped.
+ unsafe {
+ let iter = Self { data: mem::transmute_copy(&array), alive: 0..N };
+ mem::forget(array);
+ iter
+ }
+ }
+
+ /// Returns an immutable slice of all elements that have not been yielded
+ /// yet.
+ fn as_slice(&self) -> &[T] {
+ // SAFETY: We know that all elements within `alive` are properly initialized.
+ unsafe {
+ let slice = self.data.get_unchecked(self.alive.clone());
+ MaybeUninit::slice_assume_init_ref(slice)
+ }
+ }
+
+ /// Returns a mutable slice of all elements that have not been yielded yet.
+ fn as_mut_slice(&mut self) -> &mut [T] {
+ // SAFETY: We know that all elements within `alive` are properly initialized.
+ unsafe {
+ let slice = self.data.get_unchecked_mut(self.alive.clone());
+ MaybeUninit::slice_assume_init_mut(slice)
+ }
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> Iterator for IntoIter<T, N> {
+ type Item = T;
+ fn next(&mut self) -> Option<Self::Item> {
+ // Get the next index from the front.
+ //
+ // Increasing `alive.start` by 1 maintains the invariant regarding
+ // `alive`. However, due to this change, for a short time, the alive
+ // zone is not `data[alive]` anymore, but `data[idx..alive.end]`.
+ self.alive.next().map(|idx| {
+ // Read the element from the array.
+ // SAFETY: `idx` is an index into the former "alive" region of the
+ // array. Reading this element means that `data[idx]` is regarded as
+ // dead now (i.e. do not touch). As `idx` was the start of the
+ // alive-zone, the alive zone is now `data[alive]` again, restoring
+ // all invariants.
+ unsafe { self.data.get_unchecked(idx).assume_init_read() }
+ })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> DoubleEndedIterator for IntoIter<T, N> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ // Get the next index from the back.
+ //
+ // Decreasing `alive.end` by 1 maintains the invariant regarding
+ // `alive`. However, due to this change, for a short time, the alive
+ // zone is not `data[alive]` anymore, but `data[alive.start..=idx]`.
+ self.alive.next_back().map(|idx| {
+ // Read the element from the array.
+ // SAFETY: `idx` is an index into the former "alive" region of the
+ // array. Reading this element means that `data[idx]` is regarded as
+ // dead now (i.e. do not touch). As `idx` was the end of the
+ // alive-zone, the alive zone is now `data[alive]` again, restoring
+ // all invariants.
+ unsafe { self.data.get_unchecked(idx).assume_init_read() }
+ })
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> Drop for IntoIter<T, N> {
+ fn drop(&mut self) {
+ // SAFETY: This is safe: `as_mut_slice` returns exactly the sub-slice
+ // of elements that have not been moved out yet and that remain
+ // to be dropped.
+ unsafe { ptr::drop_in_place(self.as_mut_slice()) }
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> ExactSizeIterator for IntoIter<T, N> {
+ fn len(&self) -> usize {
+ // Will never underflow due to the invariant `alive.start <=
+ // alive.end`.
+ self.alive.end - self.alive.start
+ }
+ fn is_empty(&self) -> bool {
+ self.alive.is_empty()
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T, const N: usize> FusedIterator for IntoIter<T, N> {}
+
+// The iterator indeed reports the correct length. The number of "alive"
+// elements (that will still be yielded) is the length of the range `alive`.
+// This range is decremented in length in either `next` or `next_back`. It is
+// always decremented by 1 in those methods, but only if `Some(_)` is returned.
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+unsafe impl<T, const N: usize> TrustedLen for IntoIter<T, N> {}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T: Clone, const N: usize> Clone for IntoIter<T, N> {
+ fn clone(&self) -> Self {
+ // Note, we don't really need to match the exact same alive range, so
+ // we can just clone into offset 0 regardless of where `self` is.
+ let mut new = Self { data: MaybeUninit::uninit_array(), alive: 0..0 };
+
+ // Clone all alive elements.
+ for (src, dst) in self.as_slice().iter().zip(&mut new.data) {
+ // Write a clone into the new array, then update its alive range.
+ // If cloning panics, we'll correctly drop the previous items.
+ dst.write(src.clone());
+ new.alive.end += 1;
+ }
+
+ new
+ }
+}
+
+#[stable(feature = "array_value_iter_impls", since = "1.40.0")]
+impl<T: fmt::Debug, const N: usize> fmt::Debug for IntoIter<T, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Only print the elements that were not yielded yet: we cannot
+ // access the yielded elements anymore.
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
--- /dev/null
+//! Implementations of things like `Eq` for fixed-length arrays
+//! up to a certain length. Eventually, we should be able to generalize
+//! to all lengths.
+//!
+//! *[See also the array primitive type](../../std/primitive.array.html).*
+
+#![stable(feature = "core_array", since = "1.36.0")]
+
+use crate::borrow::{Borrow, BorrowMut};
+use crate::cmp::Ordering;
+use crate::convert::{Infallible, TryFrom};
+use crate::fmt;
+use crate::hash::{self, Hash};
+use crate::marker::Unsize;
+use crate::slice::{Iter, IterMut};
+
+mod iter;
+
+#[unstable(feature = "array_value_iter", issue = "65798")]
+pub use iter::IntoIter;
+
+/// Converts a reference to `T` into a reference to an array of length 1 (without copying).
+#[unstable(feature = "array_from_ref", issue = "77101")]
+pub fn from_ref<T>(s: &T) -> &[T; 1] {
+ // SAFETY: Converting `&T` to `&[T; 1]` is sound.
+ unsafe { &*(s as *const T).cast::<[T; 1]>() }
+}
+
+/// Converts a mutable reference to `T` into a mutable reference to an array of length 1 (without copying).
+#[unstable(feature = "array_from_ref", issue = "77101")]
+pub fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
+ // SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
+ unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
+}
+
+/// Utility trait implemented only on arrays of fixed size
+///
+/// This trait can be used to implement other traits on fixed-size arrays
+/// without causing much metadata bloat.
+///
+/// The trait is marked unsafe in order to restrict implementors to fixed-size
+/// arrays. User of this trait can assume that implementors have the exact
+/// layout in memory of a fixed size array (for example, for unsafe
+/// initialization).
+///
+/// Note that the traits [`AsRef`] and [`AsMut`] provide similar methods for types that
+/// may not be fixed-size arrays. Implementors should prefer those traits
+/// instead.
+#[unstable(feature = "fixed_size_array", issue = "27778")]
+pub unsafe trait FixedSizeArray<T> {
+ /// Converts the array to immutable slice
+ #[unstable(feature = "fixed_size_array", issue = "27778")]
+ fn as_slice(&self) -> &[T];
+ /// Converts the array to mutable slice
+ #[unstable(feature = "fixed_size_array", issue = "27778")]
+ fn as_mut_slice(&mut self) -> &mut [T];
+}
+
+#[unstable(feature = "fixed_size_array", issue = "27778")]
+unsafe impl<T, A: Unsize<[T]>> FixedSizeArray<T> for A {
+ #[inline]
+ fn as_slice(&self) -> &[T] {
+ self
+ }
+ #[inline]
+ fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+/// The error type returned when a conversion from a slice to an array fails.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[derive(Debug, Copy, Clone)]
+pub struct TryFromSliceError(());
+
+#[stable(feature = "core_array", since = "1.36.0")]
+impl fmt::Display for TryFromSliceError {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.__description(), f)
+ }
+}
+
+impl TryFromSliceError {
+ #[unstable(
+ feature = "array_error_internals",
+ reason = "available through Error trait and this method should not \
+ be exposed publicly",
+ issue = "none"
+ )]
+ #[inline]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ "could not convert slice to array"
+ }
+}
+
+#[stable(feature = "try_from_slice_error", since = "1.36.0")]
+impl From<Infallible> for TryFromSliceError {
+ fn from(x: Infallible) -> TryFromSliceError {
+ match x {}
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, const N: usize> AsRef<[T]> for [T; N] {
+ #[inline]
+ fn as_ref(&self) -> &[T] {
+ &self[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, const N: usize> AsMut<[T]> for [T; N] {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [T] {
+ &mut self[..]
+ }
+}
+
+#[stable(feature = "array_borrow", since = "1.4.0")]
+impl<T, const N: usize> Borrow<[T]> for [T; N] {
+ fn borrow(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "array_borrow", since = "1.4.0")]
+impl<T, const N: usize> BorrowMut<[T]> for [T; N] {
+ fn borrow_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<T, const N: usize> TryFrom<&[T]> for [T; N]
+where
+ T: Copy,
+{
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
+ <&Self>::try_from(slice).map(|r| *r)
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<'a, T, const N: usize> TryFrom<&'a [T]> for &'a [T; N] {
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &[T]) -> Result<&[T; N], TryFromSliceError> {
+ if slice.len() == N {
+ let ptr = slice.as_ptr() as *const [T; N];
+ // SAFETY: ok because we just checked that the length fits
+ unsafe { Ok(&*ptr) }
+ } else {
+ Err(TryFromSliceError(()))
+ }
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<'a, T, const N: usize> TryFrom<&'a mut [T]> for &'a mut [T; N] {
+ type Error = TryFromSliceError;
+
+ fn try_from(slice: &mut [T]) -> Result<&mut [T; N], TryFromSliceError> {
+ if slice.len() == N {
+ let ptr = slice.as_mut_ptr() as *mut [T; N];
+ // SAFETY: ok because we just checked that the length fits
+ unsafe { Ok(&mut *ptr) }
+ } else {
+ Err(TryFromSliceError(()))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, const N: usize> Hash for [T; N] {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ Hash::hash(&self[..], state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, const N: usize> fmt::Debug for [T; N] {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&&self[..], f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, const N: usize> IntoIterator for &'a [T; N] {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, const N: usize> IntoIterator for &'a mut [T; N] {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[B; N]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &[B; N]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &[B; N]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[B]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &[B]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &[B]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B, const N: usize> PartialEq<[A; N]> for [B]
+where
+ B: PartialEq<A>,
+{
+ #[inline]
+ fn eq(&self, other: &[A; N]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &[A; N]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'b, A, B, const N: usize> PartialEq<&'b [B]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &&'b [B]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &&'b [B]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b [B]
+where
+ B: PartialEq<A>,
+{
+ #[inline]
+ fn eq(&self, other: &[A; N]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &[A; N]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'b, A, B, const N: usize> PartialEq<&'b mut [B]> for [A; N]
+where
+ A: PartialEq<B>,
+{
+ #[inline]
+ fn eq(&self, other: &&'b mut [B]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &&'b mut [B]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'b, A, B, const N: usize> PartialEq<[A; N]> for &'b mut [B]
+where
+ B: PartialEq<A>,
+{
+ #[inline]
+ fn eq(&self, other: &[A; N]) -> bool {
+ self[..] == other[..]
+ }
+ #[inline]
+ fn ne(&self, other: &[A; N]) -> bool {
+ self[..] != other[..]
+ }
+}
+
+// NOTE: some less important impls are omitted to reduce code bloat
+// __impl_slice_eq2! { [A; $N], &'b [B; $N] }
+// __impl_slice_eq2! { [A; $N], &'b mut [B; $N] }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, const N: usize> Eq for [T; N] {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, const N: usize> PartialOrd for [T; N] {
+ #[inline]
+ fn partial_cmp(&self, other: &[T; N]) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn lt(&self, other: &[T; N]) -> bool {
+ PartialOrd::lt(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn le(&self, other: &[T; N]) -> bool {
+ PartialOrd::le(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn ge(&self, other: &[T; N]) -> bool {
+ PartialOrd::ge(&&self[..], &&other[..])
+ }
+ #[inline]
+ fn gt(&self, other: &[T; N]) -> bool {
+ PartialOrd::gt(&&self[..], &&other[..])
+ }
+}
+
+/// Implements comparison of arrays [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, const N: usize> Ord for [T; N] {
+ #[inline]
+ fn cmp(&self, other: &[T; N]) -> Ordering {
+ Ord::cmp(&&self[..], &&other[..])
+ }
+}
+
+// The Default impls cannot be done with const generics because `[T; 0]` doesn't
+// require Default to be implemented, and having different impl blocks for
+// different numbers isn't supported yet.
+
+macro_rules! array_impl_default {
+ {$n:expr, $t:ident $($ts:ident)*} => {
+ #[stable(since = "1.4.0", feature = "array_default")]
+ impl<T> Default for [T; $n] where T: Default {
+ fn default() -> [T; $n] {
+ [$t::default(), $($ts::default()),*]
+ }
+ }
+ array_impl_default!{($n - 1), $($ts)*}
+ };
+ {$n:expr,} => {
+ #[stable(since = "1.4.0", feature = "array_default")]
+ impl<T> Default for [T; $n] {
+ fn default() -> [T; $n] { [] }
+ }
+ };
+}
+
+array_impl_default! {32, T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T T}
+
+#[lang = "array"]
+impl<T, const N: usize> [T; N] {
+ /// Returns an array of the same size as `self`, with function `f` applied to each element
+ /// in order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_map)]
+ /// let x = [1, 2, 3];
+ /// let y = x.map(|v| v + 1);
+ /// assert_eq!(y, [2, 3, 4]);
+ ///
+ /// let x = [1, 2, 3];
+ /// let mut temp = 0;
+ /// let y = x.map(|v| { temp += 1; v * temp });
+ /// assert_eq!(y, [1, 4, 9]);
+ ///
+ /// let x = ["Ferris", "Bueller's", "Day", "Off"];
+ /// let y = x.map(|v| v.len());
+ /// assert_eq!(y, [6, 9, 3, 3]);
+ /// ```
+ #[unstable(feature = "array_map", issue = "75243")]
+ pub fn map<F, U>(self, mut f: F) -> [U; N]
+ where
+ F: FnMut(T) -> U,
+ {
+ use crate::mem::MaybeUninit;
+ struct Guard<T, const N: usize> {
+ dst: *mut T,
+ initialized: usize,
+ }
+
+ impl<T, const N: usize> Drop for Guard<T, N> {
+ fn drop(&mut self) {
+ debug_assert!(self.initialized <= N);
+
+ let initialized_part =
+ crate::ptr::slice_from_raw_parts_mut(self.dst, self.initialized);
+ // SAFETY: this raw slice will contain only initialized objects
+ // that's why, it is allowed to drop it.
+ unsafe {
+ crate::ptr::drop_in_place(initialized_part);
+ }
+ }
+ }
+ let mut dst = MaybeUninit::uninit_array::<N>();
+ let mut guard: Guard<U, N> =
+ Guard { dst: MaybeUninit::slice_as_mut_ptr(&mut dst), initialized: 0 };
+ for (src, dst) in IntoIter::new(self).zip(&mut dst) {
+ dst.write(f(src));
+ guard.initialized += 1;
+ }
+ // FIXME: Convert to crate::mem::transmute once it works with generics.
+ // unsafe { crate::mem::transmute::<[MaybeUninit<U>; N], [U; N]>(dst) }
+ crate::mem::forget(guard);
+ // SAFETY: At this point we've properly initialized the whole array
+ // and we just need to cast it to the correct type.
+ unsafe { crate::mem::transmute_copy::<_, [U; N]>(&dst) }
+ }
+
+ /// Returns a slice containing the entire array. Equivalent to `&s[..]`.
+ #[unstable(feature = "array_methods", issue = "76118")]
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Returns a mutable slice containing the entire array. Equivalent to
+ /// `&mut s[..]`.
+ #[unstable(feature = "array_methods", issue = "76118")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+}
--- /dev/null
+//! Operations on ASCII strings and characters.
+//!
+//! Most string operations in Rust act on UTF-8 strings. However, at times it
+//! makes more sense to only consider the ASCII character set for a specific
+//! operation.
+//!
+//! The [`escape_default`] function provides an iterator over the bytes of an
+//! escaped version of the character given.
+
+#![stable(feature = "core_ascii", since = "1.26.0")]
+
+use crate::fmt;
+use crate::iter::FusedIterator;
+use crate::ops::Range;
+use crate::str::from_utf8_unchecked;
+
+/// An iterator over the escaped version of a byte.
+///
+/// This `struct` is created by the [`escape_default`] function. See its
+/// documentation for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct EscapeDefault {
+ range: Range<usize>,
+ data: [u8; 4],
+}
+
+/// Returns an iterator that produces an escaped version of a `u8`.
+///
+/// The default is chosen with a bias toward producing literals that are
+/// legal in a variety of languages, including C++11 and similar C-family
+/// languages. The exact rules are:
+///
+/// * Tab is escaped as `\t`.
+/// * Carriage return is escaped as `\r`.
+/// * Line feed is escaped as `\n`.
+/// * Single quote is escaped as `\'`.
+/// * Double quote is escaped as `\"`.
+/// * Backslash is escaped as `\\`.
+/// * Any character in the 'printable ASCII' range `0x20` .. `0x7e`
+/// inclusive is not escaped.
+/// * Any other chars are given hex escapes of the form '\xNN'.
+/// * Unicode escapes are never generated by this function.
+///
+/// # Examples
+///
+/// ```
+/// use std::ascii;
+///
+/// let escaped = ascii::escape_default(b'0').next().unwrap();
+/// assert_eq!(b'0', escaped);
+///
+/// let mut escaped = ascii::escape_default(b'\t');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b't', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\r');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'r', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\n');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'n', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\'');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'\'', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'"');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'"', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\\');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'\\', escaped.next().unwrap());
+///
+/// let mut escaped = ascii::escape_default(b'\x9d');
+///
+/// assert_eq!(b'\\', escaped.next().unwrap());
+/// assert_eq!(b'x', escaped.next().unwrap());
+/// assert_eq!(b'9', escaped.next().unwrap());
+/// assert_eq!(b'd', escaped.next().unwrap());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn escape_default(c: u8) -> EscapeDefault {
+ let (data, len) = match c {
+ b'\t' => ([b'\\', b't', 0, 0], 2),
+ b'\r' => ([b'\\', b'r', 0, 0], 2),
+ b'\n' => ([b'\\', b'n', 0, 0], 2),
+ b'\\' => ([b'\\', b'\\', 0, 0], 2),
+ b'\'' => ([b'\\', b'\'', 0, 0], 2),
+ b'"' => ([b'\\', b'"', 0, 0], 2),
+ b'\x20'..=b'\x7e' => ([c, 0, 0, 0], 1),
+ _ => ([b'\\', b'x', hexify(c >> 4), hexify(c & 0xf)], 4),
+ };
+
+ return EscapeDefault { range: 0..len, data };
+
+ fn hexify(b: u8) -> u8 {
+ match b {
+ 0..=9 => b'0' + b,
+ _ => b'a' + b - 10,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for EscapeDefault {
+ type Item = u8;
+ fn next(&mut self) -> Option<u8> {
+ self.range.next().map(|i| self.data[i])
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.range.size_hint()
+ }
+ fn last(mut self) -> Option<u8> {
+ self.next_back()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl DoubleEndedIterator for EscapeDefault {
+ fn next_back(&mut self) -> Option<u8> {
+ self.range.next_back().map(|i| self.data[i])
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ExactSizeIterator for EscapeDefault {}
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeDefault {}
+
+#[stable(feature = "ascii_escape_display", since = "1.39.0")]
+impl fmt::Display for EscapeDefault {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: ok because `escape_default` created only valid utf-8 data
+ f.write_str(unsafe { from_utf8_unchecked(&self.data[self.range.clone()]) })
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for EscapeDefault {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("EscapeDefault { .. }")
+ }
+}
--- /dev/null
+//! impl bool {}
+
+#[lang = "bool"]
+impl bool {
+ /// Returns `Some(t)` if the `bool` is `true`, or `None` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(bool_to_option)]
+ ///
+ /// assert_eq!(false.then_some(0), None);
+ /// assert_eq!(true.then_some(0), Some(0));
+ /// ```
+ #[unstable(feature = "bool_to_option", issue = "64260")]
+ #[inline]
+ pub fn then_some<T>(self, t: T) -> Option<T> {
+ if self { Some(t) } else { None }
+ }
+
+ /// Returns `Some(f())` if the `bool` is `true`, or `None` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(bool_to_option)]
+ ///
+ /// assert_eq!(false.then(|| 0), None);
+ /// assert_eq!(true.then(|| 0), Some(0));
+ /// ```
+ #[unstable(feature = "bool_to_option", issue = "64260")]
+ #[inline]
+ pub fn then<T, F: FnOnce() -> T>(self, f: F) -> Option<T> {
+ if self { Some(f()) } else { None }
+ }
+}
--- /dev/null
+//! A module for working with borrowed data.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+/// A trait for borrowing data.
+///
+/// In Rust, it is common to provide different representations of a type for
+/// different use cases. For instance, storage location and management for a
+/// value can be specifically chosen as appropriate for a particular use via
+/// pointer types such as [`Box<T>`] or [`Rc<T>`]. Beyond these generic
+/// wrappers that can be used with any type, some types provide optional
+/// facets providing potentially costly functionality. An example for such a
+/// type is [`String`] which adds the ability to extend a string to the basic
+/// [`str`]. This requires keeping additional information unnecessary for a
+/// simple, immutable string.
+///
+/// These types provide access to the underlying data through references
+/// to the type of that data. They are said to be ‘borrowed as’ that type.
+/// For instance, a [`Box<T>`] can be borrowed as `T` while a [`String`]
+/// can be borrowed as `str`.
+///
+/// Types express that they can be borrowed as some type `T` by implementing
+/// `Borrow<T>`, providing a reference to a `T` in the trait’s
+/// [`borrow`] method. A type is free to borrow as several different types.
+/// If it wishes to mutably borrow as the type – allowing the underlying data
+/// to be modified, it can additionally implement [`BorrowMut<T>`].
+///
+/// Further, when providing implementations for additional traits, it needs
+/// to be considered whether they should behave identical to those of the
+/// underlying type as a consequence of acting as a representation of that
+/// underlying type. Generic code typically uses `Borrow<T>` when it relies
+/// on the identical behavior of these additional trait implementations.
+/// These traits will likely appear as additional trait bounds.
+///
+/// In particular `Eq`, `Ord` and `Hash` must be equivalent for
+/// borrowed and owned values: `x.borrow() == y.borrow()` should give the
+/// same result as `x == y`.
+///
+/// If generic code merely needs to work for all types that can
+/// provide a reference to related type `T`, it is often better to use
+/// [`AsRef<T>`] as more types can safely implement it.
+///
+/// [`BorrowMut<T>`]: BorrowMut
+/// [`Box<T>`]: ../../std/boxed/struct.Box.html
+/// [`Mutex<T>`]: ../../std/sync/struct.Mutex.html
+/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
+/// [`String`]: ../../std/string/struct.String.html
+/// [`borrow`]: Borrow::borrow
+///
+/// # Examples
+///
+/// As a data collection, [`HashMap<K, V>`] owns both keys and values. If
+/// the key’s actual data is wrapped in a managing type of some kind, it
+/// should, however, still be possible to search for a value using a
+/// reference to the key’s data. For instance, if the key is a string, then
+/// it is likely stored with the hash map as a [`String`], while it should
+/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to
+/// operate on a `String` while `get` needs to be able to use a `&str`.
+///
+/// Slightly simplified, the relevant parts of `HashMap<K, V>` look like
+/// this:
+///
+/// ```
+/// use std::borrow::Borrow;
+/// use std::hash::Hash;
+///
+/// pub struct HashMap<K, V> {
+/// # marker: ::std::marker::PhantomData<(K, V)>,
+/// // fields omitted
+/// }
+///
+/// impl<K, V> HashMap<K, V> {
+/// pub fn insert(&self, key: K, value: V) -> Option<V>
+/// where K: Hash + Eq
+/// {
+/// # unimplemented!()
+/// // ...
+/// }
+///
+/// pub fn get<Q>(&self, k: &Q) -> Option<&V>
+/// where
+/// K: Borrow<Q>,
+/// Q: Hash + Eq + ?Sized
+/// {
+/// # unimplemented!()
+/// // ...
+/// }
+/// }
+/// ```
+///
+/// The entire hash map is generic over a key type `K`. Because these keys
+/// are stored with the hash map, this type has to own the key’s data.
+/// When inserting a key-value pair, the map is given such a `K` and needs
+/// to find the correct hash bucket and check if the key is already present
+/// based on that `K`. It therefore requires `K: Hash + Eq`.
+///
+/// When searching for a value in the map, however, having to provide a
+/// reference to a `K` as the key to search for would require to always
+/// create such an owned value. For string keys, this would mean a `String`
+/// value needs to be created just for the search for cases where only a
+/// `str` is available.
+///
+/// Instead, the `get` method is generic over the type of the underlying key
+/// data, called `Q` in the method signature above. It states that `K`
+/// borrows as a `Q` by requiring that `K: Borrow<Q>`. By additionally
+/// requiring `Q: Hash + Eq`, it signals the requirement that `K` and `Q`
+/// have implementations of the `Hash` and `Eq` traits that produce identical
+/// results.
+///
+/// The implementation of `get` relies in particular on identical
+/// implementations of `Hash` by determining the key’s hash bucket by calling
+/// `Hash::hash` on the `Q` value even though it inserted the key based on
+/// the hash value calculated from the `K` value.
+///
+/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value
+/// produces a different hash than `Q`. For instance, imagine you have a
+/// type that wraps a string but compares ASCII letters ignoring their case:
+///
+/// ```
+/// pub struct CaseInsensitiveString(String);
+///
+/// impl PartialEq for CaseInsensitiveString {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.0.eq_ignore_ascii_case(&other.0)
+/// }
+/// }
+///
+/// impl Eq for CaseInsensitiveString { }
+/// ```
+///
+/// Because two equal values need to produce the same hash value, the
+/// implementation of `Hash` needs to ignore ASCII case, too:
+///
+/// ```
+/// # use std::hash::{Hash, Hasher};
+/// # pub struct CaseInsensitiveString(String);
+/// impl Hash for CaseInsensitiveString {
+/// fn hash<H: Hasher>(&self, state: &mut H) {
+/// for c in self.0.as_bytes() {
+/// c.to_ascii_lowercase().hash(state)
+/// }
+/// }
+/// }
+/// ```
+///
+/// Can `CaseInsensitiveString` implement `Borrow<str>`? It certainly can
+/// provide a reference to a string slice via its contained owned string.
+/// But because its `Hash` implementation differs, it behaves differently
+/// from `str` and therefore must not, in fact, implement `Borrow<str>`.
+/// If it wants to allow others access to the underlying `str`, it can do
+/// that via `AsRef<str>` which doesn’t carry any extra requirements.
+///
+/// [`Hash`]: crate::hash::Hash
+/// [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
+/// [`String`]: ../../std/string/struct.String.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Borrow<Borrowed: ?Sized> {
+ /// Immutably borrows from an owned value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::Borrow;
+ ///
+ /// fn check<T: Borrow<str>>(s: T) {
+ /// assert_eq!("Hello", s.borrow());
+ /// }
+ ///
+ /// let s = "Hello".to_string();
+ ///
+ /// check(s);
+ ///
+ /// let s = "Hello";
+ ///
+ /// check(s);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn borrow(&self) -> &Borrowed;
+}
+
+/// A trait for mutably borrowing data.
+///
+/// As a companion to [`Borrow<T>`] this trait allows a type to borrow as
+/// an underlying type by providing a mutable reference. See [`Borrow<T>`]
+/// for more information on borrowing as another type.
+///
+/// [`Borrow<T>`]: Borrow
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait BorrowMut<Borrowed: ?Sized>: Borrow<Borrowed> {
+ /// Mutably borrows from an owned value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::BorrowMut;
+ ///
+ /// fn check<T: BorrowMut<[i32]>>(mut v: T) {
+ /// assert_eq!(&mut [1, 2, 3], v.borrow_mut());
+ /// }
+ ///
+ /// let v = vec![1, 2, 3];
+ ///
+ /// check(v);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn borrow_mut(&mut self) -> &mut Borrowed;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Borrow<T> for T {
+ fn borrow(&self) -> &T {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> BorrowMut<T> for T {
+ fn borrow_mut(&mut self) -> &mut T {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Borrow<T> for &T {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Borrow<T> for &mut T {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> BorrowMut<T> for &mut T {
+ fn borrow_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
--- /dev/null
+//! Shareable mutable containers.
+//!
+//! Rust memory safety is based on this rule: Given an object `T`, it is only possible to
+//! have one of the following:
+//!
+//! - Having several immutable references (`&T`) to the object (also known as **aliasing**).
+//! - Having one mutable reference (`&mut T`) to the object (also known as **mutability**).
+//!
+//! This is enforced by the Rust compiler. However, there are situations where this rule is not
+//! flexible enough. Sometimes it is required to have multiple references to an object and yet
+//! mutate it.
+//!
+//! Shareable mutable containers exist to permit mutability in a controlled manner, even in the
+//! presence of aliasing. Both `Cell<T>` and `RefCell<T>` allow doing this in a single-threaded
+//! way. However, neither `Cell<T>` nor `RefCell<T>` are thread safe (they do not implement
+//! `Sync`). If you need to do aliasing and mutation between multiple threads it is possible to
+//! use [`Mutex`](../../std/sync/struct.Mutex.html),
+//! [`RwLock`](../../std/sync/struct.RwLock.html) or
+//! [`atomic`](../../core/sync/atomic/index.html) types.
+//!
+//! Values of the `Cell<T>` and `RefCell<T>` types may be mutated through shared references (i.e.
+//! the common `&T` type), whereas most Rust types can only be mutated through unique (`&mut T`)
+//! references. We say that `Cell<T>` and `RefCell<T>` provide 'interior mutability', in contrast
+//! with typical Rust types that exhibit 'inherited mutability'.
+//!
+//! Cell types come in two flavors: `Cell<T>` and `RefCell<T>`. `Cell<T>` implements interior
+//! mutability by moving values in and out of the `Cell<T>`. To use references instead of values,
+//! one must use the `RefCell<T>` type, acquiring a write lock before mutating. `Cell<T>` provides
+//! methods to retrieve and change the current interior value:
+//!
+//! - For types that implement `Copy`, the `get` method retrieves the current interior value.
+//! - For types that implement `Default`, the `take` method replaces the current interior value
+//! with `Default::default()` and returns the replaced value.
+//! - For all types, the `replace` method replaces the current interior value and returns the
+//! replaced value and the `into_inner` method consumes the `Cell<T>` and returns the interior
+//! value. Additionally, the `set` method replaces the interior value, dropping the replaced
+//! value.
+//!
+//! `RefCell<T>` uses Rust's lifetimes to implement 'dynamic borrowing', a process whereby one can
+//! claim temporary, exclusive, mutable access to the inner value. Borrows for `RefCell<T>`s are
+//! tracked 'at runtime', unlike Rust's native reference types which are entirely tracked
+//! statically, at compile time. Because `RefCell<T>` borrows are dynamic it is possible to attempt
+//! to borrow a value that is already mutably borrowed; when this happens it results in thread
+//! panic.
+//!
+//! # When to choose interior mutability
+//!
+//! The more common inherited mutability, where one must have unique access to mutate a value, is
+//! one of the key language elements that enables Rust to reason strongly about pointer aliasing,
+//! statically preventing crash bugs. Because of that, inherited mutability is preferred, and
+//! interior mutability is something of a last resort. Since cell types enable mutation where it
+//! would otherwise be disallowed though, there are occasions when interior mutability might be
+//! appropriate, or even *must* be used, e.g.
+//!
+//! * Introducing mutability 'inside' of something immutable
+//! * Implementation details of logically-immutable methods.
+//! * Mutating implementations of `Clone`.
+//!
+//! ## Introducing mutability 'inside' of something immutable
+//!
+//! Many shared smart pointer types, including `Rc<T>` and `Arc<T>`, provide containers that can be
+//! cloned and shared between multiple parties. Because the contained values may be
+//! multiply-aliased, they can only be borrowed with `&`, not `&mut`. Without cells it would be
+//! impossible to mutate data inside of these smart pointers at all.
+//!
+//! It's very common then to put a `RefCell<T>` inside shared pointer types to reintroduce
+//! mutability:
+//!
+//! ```
+//! use std::cell::{RefCell, RefMut};
+//! use std::collections::HashMap;
+//! use std::rc::Rc;
+//!
+//! fn main() {
+//! let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
+//! // Create a new block to limit the scope of the dynamic borrow
+//! {
+//! let mut map: RefMut<_> = shared_map.borrow_mut();
+//! map.insert("africa", 92388);
+//! map.insert("kyoto", 11837);
+//! map.insert("piccadilly", 11826);
+//! map.insert("marbles", 38);
+//! }
+//!
+//! // Note that if we had not let the previous borrow of the cache fall out
+//! // of scope then the subsequent borrow would cause a dynamic thread panic.
+//! // This is the major hazard of using `RefCell`.
+//! let total: i32 = shared_map.borrow().values().sum();
+//! println!("{}", total);
+//! }
+//! ```
+//!
+//! Note that this example uses `Rc<T>` and not `Arc<T>`. `RefCell<T>`s are for single-threaded
+//! scenarios. Consider using `RwLock<T>` or `Mutex<T>` if you need shared mutability in a
+//! multi-threaded situation.
+//!
+//! ## Implementation details of logically-immutable methods
+//!
+//! Occasionally it may be desirable not to expose in an API that there is mutation happening
+//! "under the hood". This may be because logically the operation is immutable, but e.g., caching
+//! forces the implementation to perform mutation; or because you must employ mutation to implement
+//! a trait method that was originally defined to take `&self`.
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use std::cell::RefCell;
+//!
+//! struct Graph {
+//! edges: Vec<(i32, i32)>,
+//! span_tree_cache: RefCell<Option<Vec<(i32, i32)>>>
+//! }
+//!
+//! impl Graph {
+//! fn minimum_spanning_tree(&self) -> Vec<(i32, i32)> {
+//! self.span_tree_cache.borrow_mut()
+//! .get_or_insert_with(|| self.calc_span_tree())
+//! .clone()
+//! }
+//!
+//! fn calc_span_tree(&self) -> Vec<(i32, i32)> {
+//! // Expensive computation goes here
+//! vec![]
+//! }
+//! }
+//! ```
+//!
+//! ## Mutating implementations of `Clone`
+//!
+//! This is simply a special - but common - case of the previous: hiding mutability for operations
+//! that appear to be immutable. The `clone` method is expected to not change the source value, and
+//! is declared to take `&self`, not `&mut self`. Therefore, any mutation that happens in the
+//! `clone` method must use cell types. For example, `Rc<T>` maintains its reference counts within a
+//! `Cell<T>`.
+//!
+//! ```
+//! use std::cell::Cell;
+//! use std::ptr::NonNull;
+//! use std::process::abort;
+//! use std::marker::PhantomData;
+//!
+//! struct Rc<T: ?Sized> {
+//! ptr: NonNull<RcBox<T>>,
+//! phantom: PhantomData<RcBox<T>>,
+//! }
+//!
+//! struct RcBox<T: ?Sized> {
+//! strong: Cell<usize>,
+//! refcount: Cell<usize>,
+//! value: T,
+//! }
+//!
+//! impl<T: ?Sized> Clone for Rc<T> {
+//! fn clone(&self) -> Rc<T> {
+//! self.inc_strong();
+//! Rc {
+//! ptr: self.ptr,
+//! phantom: PhantomData,
+//! }
+//! }
+//! }
+//!
+//! trait RcBoxPtr<T: ?Sized> {
+//!
+//! fn inner(&self) -> &RcBox<T>;
+//!
+//! fn strong(&self) -> usize {
+//! self.inner().strong.get()
+//! }
+//!
+//! fn inc_strong(&self) {
+//! self.inner()
+//! .strong
+//! .set(self.strong()
+//! .checked_add(1)
+//! .unwrap_or_else(|| abort() ));
+//! }
+//! }
+//!
+//! impl<T: ?Sized> RcBoxPtr<T> for Rc<T> {
+//! fn inner(&self) -> &RcBox<T> {
+//! unsafe {
+//! self.ptr.as_ref()
+//! }
+//! }
+//! }
+//! ```
+//!
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cmp::Ordering;
+use crate::fmt::{self, Debug, Display};
+use crate::marker::Unsize;
+use crate::mem;
+use crate::ops::{CoerceUnsized, Deref, DerefMut};
+use crate::ptr;
+
+/// A mutable memory location.
+///
+/// # Examples
+///
+/// In this example, you can see that `Cell<T>` enables mutation inside an
+/// immutable struct. In other words, it enables "interior mutability".
+///
+/// ```
+/// use std::cell::Cell;
+///
+/// struct SomeStruct {
+/// regular_field: u8,
+/// special_field: Cell<u8>,
+/// }
+///
+/// let my_struct = SomeStruct {
+/// regular_field: 0,
+/// special_field: Cell::new(1),
+/// };
+///
+/// let new_value = 100;
+///
+/// // ERROR: `my_struct` is immutable
+/// // my_struct.regular_field = new_value;
+///
+/// // WORKS: although `my_struct` is immutable, `special_field` is a `Cell`,
+/// // which can always be mutated
+/// my_struct.special_field.set(new_value);
+/// assert_eq!(my_struct.special_field.get(), new_value);
+/// ```
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(transparent)]
+pub struct Cell<T: ?Sized> {
+ value: UnsafeCell<T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized> Send for Cell<T> where T: Send {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for Cell<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Copy> Clone for Cell<T> {
+ #[inline]
+ fn clone(&self) -> Cell<T> {
+ Cell::new(self.get())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Cell<T> {
+ /// Creates a `Cell<T>`, with the `Default` value for T.
+ #[inline]
+ fn default() -> Cell<T> {
+ Cell::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq + Copy> PartialEq for Cell<T> {
+ #[inline]
+ fn eq(&self, other: &Cell<T>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+#[stable(feature = "cell_eq", since = "1.2.0")]
+impl<T: Eq + Copy> Eq for Cell<T> {}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: PartialOrd + Copy> PartialOrd for Cell<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Cell<T>) -> Option<Ordering> {
+ self.get().partial_cmp(&other.get())
+ }
+
+ #[inline]
+ fn lt(&self, other: &Cell<T>) -> bool {
+ self.get() < other.get()
+ }
+
+ #[inline]
+ fn le(&self, other: &Cell<T>) -> bool {
+ self.get() <= other.get()
+ }
+
+ #[inline]
+ fn gt(&self, other: &Cell<T>) -> bool {
+ self.get() > other.get()
+ }
+
+ #[inline]
+ fn ge(&self, other: &Cell<T>) -> bool {
+ self.get() >= other.get()
+ }
+}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: Ord + Copy> Ord for Cell<T> {
+ #[inline]
+ fn cmp(&self, other: &Cell<T>) -> Ordering {
+ self.get().cmp(&other.get())
+ }
+}
+
+#[stable(feature = "cell_from", since = "1.12.0")]
+impl<T> From<T> for Cell<T> {
+ fn from(t: T) -> Cell<T> {
+ Cell::new(t)
+ }
+}
+
+impl<T> Cell<T> {
+ /// Creates a new `Cell` containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_cell_new", since = "1.32.0")]
+ #[inline]
+ pub const fn new(value: T) -> Cell<T> {
+ Cell { value: UnsafeCell::new(value) }
+ }
+
+ /// Sets the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ ///
+ /// c.set(10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn set(&self, val: T) {
+ let old = self.replace(val);
+ drop(old);
+ }
+
+ /// Swaps the values of two Cells.
+ /// Difference with `std::mem::swap` is that this function doesn't require `&mut` reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c1 = Cell::new(5i32);
+ /// let c2 = Cell::new(10i32);
+ /// c1.swap(&c2);
+ /// assert_eq!(10, c1.get());
+ /// assert_eq!(5, c2.get());
+ /// ```
+ #[inline]
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ pub fn swap(&self, other: &Self) {
+ if ptr::eq(self, other) {
+ return;
+ }
+ // SAFETY: This can be risky if called from separate threads, but `Cell`
+ // is `!Sync` so this won't happen. This also won't invalidate any
+ // pointers since `Cell` makes sure nothing else will be pointing into
+ // either of these `Cell`s.
+ unsafe {
+ ptr::swap(self.value.get(), other.value.get());
+ }
+ }
+
+ /// Replaces the contained value, and returns it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let cell = Cell::new(5);
+ /// assert_eq!(cell.get(), 5);
+ /// assert_eq!(cell.replace(10), 5);
+ /// assert_eq!(cell.get(), 10);
+ /// ```
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ pub fn replace(&self, val: T) -> T {
+ // SAFETY: This can cause data races if called from a separate thread,
+ // but `Cell` is `!Sync` so this won't happen.
+ mem::replace(unsafe { &mut *self.value.get() }, val)
+ }
+
+ /// Unwraps the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// let five = c.into_inner();
+ ///
+ /// assert_eq!(five, 5);
+ /// ```
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> T {
+ self.value.into_inner()
+ }
+}
+
+impl<T: Copy> Cell<T> {
+ /// Returns a copy of the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ ///
+ /// let five = c.get();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self) -> T {
+ // SAFETY: This can cause data races if called from a separate thread,
+ // but `Cell` is `!Sync` so this won't happen.
+ unsafe { *self.value.get() }
+ }
+
+ /// Updates the contained value using a function and returns the new value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_update)]
+ ///
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// let new = c.update(|x| x + 1);
+ ///
+ /// assert_eq!(new, 6);
+ /// assert_eq!(c.get(), 6);
+ /// ```
+ #[inline]
+ #[unstable(feature = "cell_update", issue = "50186")]
+ pub fn update<F>(&self, f: F) -> T
+ where
+ F: FnOnce(T) -> T,
+ {
+ let old = self.get();
+ let new = f(old);
+ self.set(new);
+ new
+ }
+}
+
+impl<T: ?Sized> Cell<T> {
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ ///
+ /// let ptr = c.as_ptr();
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_as_ptr", since = "1.12.0")]
+ #[rustc_const_stable(feature = "const_cell_as_ptr", since = "1.32.0")]
+ pub const fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows `Cell` mutably (at compile-time) which guarantees
+ /// that we possess the only reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let mut c = Cell::new(5);
+ /// *c.get_mut() += 1;
+ ///
+ /// assert_eq!(c.get(), 6);
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_get_mut", since = "1.11.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Returns a `&Cell<T>` from a `&mut T`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let slice: &mut [i32] = &mut [1, 2, 3];
+ /// let cell_slice: &Cell<[i32]> = Cell::from_mut(slice);
+ /// let slice_cell: &[Cell<i32>] = cell_slice.as_slice_of_cells();
+ ///
+ /// assert_eq!(slice_cell.len(), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "as_cell", since = "1.37.0")]
+ pub fn from_mut(t: &mut T) -> &Cell<T> {
+ // SAFETY: `&mut` ensures unique access.
+ unsafe { &*(t as *mut T as *const Cell<T>) }
+ }
+}
+
+impl<T: Default> Cell<T> {
+ /// Takes the value of the cell, leaving `Default::default()` in its place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let c = Cell::new(5);
+ /// let five = c.take();
+ ///
+ /// assert_eq!(five, 5);
+ /// assert_eq!(c.into_inner(), 0);
+ /// ```
+ #[stable(feature = "move_cell", since = "1.17.0")]
+ pub fn take(&self) -> T {
+ self.replace(Default::default())
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Cell<U>> for Cell<T> {}
+
+impl<T> Cell<[T]> {
+ /// Returns a `&[Cell<T>]` from a `&Cell<[T]>`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::Cell;
+ ///
+ /// let slice: &mut [i32] = &mut [1, 2, 3];
+ /// let cell_slice: &Cell<[i32]> = Cell::from_mut(slice);
+ /// let slice_cell: &[Cell<i32>] = cell_slice.as_slice_of_cells();
+ ///
+ /// assert_eq!(slice_cell.len(), 3);
+ /// ```
+ #[stable(feature = "as_cell", since = "1.37.0")]
+ pub fn as_slice_of_cells(&self) -> &[Cell<T>] {
+ // SAFETY: `Cell<T>` has the same memory layout as `T`.
+ unsafe { &*(self as *const Cell<[T]> as *const [Cell<T>]) }
+ }
+}
+
+/// A mutable memory location with dynamically checked borrow rules
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RefCell<T: ?Sized> {
+ borrow: Cell<BorrowFlag>,
+ value: UnsafeCell<T>,
+}
+
+/// An error returned by [`RefCell::try_borrow`].
+#[stable(feature = "try_borrow", since = "1.13.0")]
+pub struct BorrowError {
+ _private: (),
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Debug for BorrowError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowError").finish()
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Display for BorrowError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt("already mutably borrowed", f)
+ }
+}
+
+/// An error returned by [`RefCell::try_borrow_mut`].
+#[stable(feature = "try_borrow", since = "1.13.0")]
+pub struct BorrowMutError {
+ _private: (),
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Debug for BorrowMutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("BorrowMutError").finish()
+ }
+}
+
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Display for BorrowMutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt("already borrowed", f)
+ }
+}
+
+// Positive values represent the number of `Ref` active. Negative values
+// represent the number of `RefMut` active. Multiple `RefMut`s can only be
+// active at a time if they refer to distinct, nonoverlapping components of a
+// `RefCell` (e.g., different ranges of a slice).
+//
+// `Ref` and `RefMut` are both two words in size, and so there will likely never
+// be enough `Ref`s or `RefMut`s in existence to overflow half of the `usize`
+// range. Thus, a `BorrowFlag` will probably never overflow or underflow.
+// However, this is not a guarantee, as a pathological program could repeatedly
+// create and then mem::forget `Ref`s or `RefMut`s. Thus, all code must
+// explicitly check for overflow and underflow in order to avoid unsafety, or at
+// least behave correctly in the event that overflow or underflow happens (e.g.,
+// see BorrowRef::new).
+type BorrowFlag = isize;
+const UNUSED: BorrowFlag = 0;
+
+#[inline(always)]
+fn is_writing(x: BorrowFlag) -> bool {
+ x < UNUSED
+}
+
+#[inline(always)]
+fn is_reading(x: BorrowFlag) -> bool {
+ x > UNUSED
+}
+
+impl<T> RefCell<T> {
+ /// Creates a new `RefCell` containing `value`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_refcell_new", since = "1.32.0")]
+ #[inline]
+ pub const fn new(value: T) -> RefCell<T> {
+ RefCell { value: UnsafeCell::new(value), borrow: Cell::new(UNUSED) }
+ }
+
+ /// Consumes the `RefCell`, returning the wrapped value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let five = c.into_inner();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ #[inline]
+ pub const fn into_inner(self) -> T {
+ // Since this function takes `self` (the `RefCell`) by value, the
+ // compiler statically verifies that it is not currently borrowed.
+ self.value.into_inner()
+ }
+
+ /// Replaces the wrapped value with a new one, returning the old value,
+ /// without deinitializing either one.
+ ///
+ /// This function corresponds to [`std::mem::replace`](../mem/fn.replace.html).
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ /// let cell = RefCell::new(5);
+ /// let old_value = cell.replace(6);
+ /// assert_eq!(old_value, 5);
+ /// assert_eq!(cell, RefCell::new(6));
+ /// ```
+ #[inline]
+ #[stable(feature = "refcell_replace", since = "1.24.0")]
+ #[track_caller]
+ pub fn replace(&self, t: T) -> T {
+ mem::replace(&mut *self.borrow_mut(), t)
+ }
+
+ /// Replaces the wrapped value with a new one computed from `f`, returning
+ /// the old value, without deinitializing either one.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ /// let cell = RefCell::new(5);
+ /// let old_value = cell.replace_with(|&mut old| old + 1);
+ /// assert_eq!(old_value, 5);
+ /// assert_eq!(cell, RefCell::new(6));
+ /// ```
+ #[inline]
+ #[stable(feature = "refcell_replace_swap", since = "1.35.0")]
+ #[track_caller]
+ pub fn replace_with<F: FnOnce(&mut T) -> T>(&self, f: F) -> T {
+ let mut_borrow = &mut *self.borrow_mut();
+ let replacement = f(mut_borrow);
+ mem::replace(mut_borrow, replacement)
+ }
+
+ /// Swaps the wrapped value of `self` with the wrapped value of `other`,
+ /// without deinitializing either one.
+ ///
+ /// This function corresponds to [`std::mem::swap`](../mem/fn.swap.html).
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ /// let c = RefCell::new(5);
+ /// let d = RefCell::new(6);
+ /// c.swap(&d);
+ /// assert_eq!(c, RefCell::new(6));
+ /// assert_eq!(d, RefCell::new(5));
+ /// ```
+ #[inline]
+ #[stable(feature = "refcell_swap", since = "1.24.0")]
+ pub fn swap(&self, other: &Self) {
+ mem::swap(&mut *self.borrow_mut(), &mut *other.borrow_mut())
+ }
+}
+
+impl<T: ?Sized> RefCell<T> {
+ /// Immutably borrows the wrapped value.
+ ///
+ /// The borrow lasts until the returned `Ref` exits scope. Multiple
+ /// immutable borrows can be taken out at the same time.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed. For a non-panicking variant, use
+ /// [`try_borrow`](#method.try_borrow).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let borrowed_five = c.borrow();
+ /// let borrowed_five2 = c.borrow();
+ /// ```
+ ///
+ /// An example of panic:
+ ///
+ /// ```should_panic
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let m = c.borrow_mut();
+ /// let b = c.borrow(); // this causes a panic
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[track_caller]
+ pub fn borrow(&self) -> Ref<'_, T> {
+ self.try_borrow().expect("already mutably borrowed")
+ }
+
+ /// Immutably borrows the wrapped value, returning an error if the value is currently mutably
+ /// borrowed.
+ ///
+ /// The borrow lasts until the returned `Ref` exits scope. Multiple immutable borrows can be
+ /// taken out at the same time.
+ ///
+ /// This is the non-panicking variant of [`borrow`](#method.borrow).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// {
+ /// let m = c.borrow_mut();
+ /// assert!(c.try_borrow().is_err());
+ /// }
+ ///
+ /// {
+ /// let m = c.borrow();
+ /// assert!(c.try_borrow().is_ok());
+ /// }
+ /// ```
+ #[stable(feature = "try_borrow", since = "1.13.0")]
+ #[inline]
+ pub fn try_borrow(&self) -> Result<Ref<'_, T>, BorrowError> {
+ match BorrowRef::new(&self.borrow) {
+ // SAFETY: `BorrowRef` ensures that there is only immutable access
+ // to the value while borrowed.
+ Some(b) => Ok(Ref { value: unsafe { &*self.value.get() }, borrow: b }),
+ None => Err(BorrowError { _private: () }),
+ }
+ }
+
+ /// Mutably borrows the wrapped value.
+ ///
+ /// The borrow lasts until the returned `RefMut` or all `RefMut`s derived
+ /// from it exit scope. The value cannot be borrowed while this borrow is
+ /// active.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed. For a non-panicking variant, use
+ /// [`try_borrow_mut`](#method.try_borrow_mut).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new("hello".to_owned());
+ ///
+ /// *c.borrow_mut() = "bonjour".to_owned();
+ ///
+ /// assert_eq!(&*c.borrow(), "bonjour");
+ /// ```
+ ///
+ /// An example of panic:
+ ///
+ /// ```should_panic
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ /// let m = c.borrow();
+ ///
+ /// let b = c.borrow_mut(); // this causes a panic
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ #[track_caller]
+ pub fn borrow_mut(&self) -> RefMut<'_, T> {
+ self.try_borrow_mut().expect("already borrowed")
+ }
+
+ /// Mutably borrows the wrapped value, returning an error if the value is currently borrowed.
+ ///
+ /// The borrow lasts until the returned `RefMut` or all `RefMut`s derived
+ /// from it exit scope. The value cannot be borrowed while this borrow is
+ /// active.
+ ///
+ /// This is the non-panicking variant of [`borrow_mut`](#method.borrow_mut).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// {
+ /// let m = c.borrow();
+ /// assert!(c.try_borrow_mut().is_err());
+ /// }
+ ///
+ /// assert!(c.try_borrow_mut().is_ok());
+ /// ```
+ #[stable(feature = "try_borrow", since = "1.13.0")]
+ #[inline]
+ pub fn try_borrow_mut(&self) -> Result<RefMut<'_, T>, BorrowMutError> {
+ match BorrowRefMut::new(&self.borrow) {
+ // SAFETY: `BorrowRef` guarantees unique access.
+ Some(b) => Ok(RefMut { value: unsafe { &mut *self.value.get() }, borrow: b }),
+ None => Err(BorrowMutError { _private: () }),
+ }
+ }
+
+ /// Returns a raw pointer to the underlying data in this cell.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// let ptr = c.as_ptr();
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_as_ptr", since = "1.12.0")]
+ pub fn as_ptr(&self) -> *mut T {
+ self.value.get()
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows `RefCell` mutably (at compile-time) so there is no
+ /// need for dynamic checks.
+ ///
+ /// However be cautious: this method expects `self` to be mutable, which is
+ /// generally not the case when using a `RefCell`. Take a look at the
+ /// [`borrow_mut`] method instead if `self` isn't mutable.
+ ///
+ /// Also, please be aware that this method is only for special circumstances and is usually
+ /// not what you want. In case of doubt, use [`borrow_mut`] instead.
+ ///
+ /// [`borrow_mut`]: #method.borrow_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let mut c = RefCell::new(5);
+ /// *c.get_mut() += 1;
+ ///
+ /// assert_eq!(c, RefCell::new(6));
+ /// ```
+ #[inline]
+ #[stable(feature = "cell_get_mut", since = "1.11.0")]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.value.get_mut()
+ }
+
+ /// Undo the effect of leaked guards on the borrow state of the `RefCell`.
+ ///
+ /// This call is similar to [`get_mut`] but more specialized. It borrows `RefCell` mutably to
+ /// ensure no borrows exist and then resets the state tracking shared borrows. This is relevant
+ /// if some `Ref` or `RefMut` borrows have been leaked.
+ ///
+ /// [`get_mut`]: #method.get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_leak)]
+ /// use std::cell::RefCell;
+ ///
+ /// let mut c = RefCell::new(0);
+ /// std::mem::forget(c.borrow_mut());
+ ///
+ /// assert!(c.try_borrow().is_err());
+ /// c.undo_leak();
+ /// assert!(c.try_borrow().is_ok());
+ /// ```
+ #[unstable(feature = "cell_leak", issue = "69099")]
+ pub fn undo_leak(&mut self) -> &mut T {
+ *self.borrow.get_mut() = UNUSED;
+ self.get_mut()
+ }
+
+ /// Immutably borrows the wrapped value, returning an error if the value is
+ /// currently mutably borrowed.
+ ///
+ /// # Safety
+ ///
+ /// Unlike `RefCell::borrow`, this method is unsafe because it does not
+ /// return a `Ref`, thus leaving the borrow flag untouched. Mutably
+ /// borrowing the `RefCell` while the reference returned by this method
+ /// is alive is undefined behaviour.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ ///
+ /// {
+ /// let m = c.borrow_mut();
+ /// assert!(unsafe { c.try_borrow_unguarded() }.is_err());
+ /// }
+ ///
+ /// {
+ /// let m = c.borrow();
+ /// assert!(unsafe { c.try_borrow_unguarded() }.is_ok());
+ /// }
+ /// ```
+ #[stable(feature = "borrow_state", since = "1.37.0")]
+ #[inline]
+ pub unsafe fn try_borrow_unguarded(&self) -> Result<&T, BorrowError> {
+ if !is_writing(self.borrow.get()) {
+ // SAFETY: We check that nobody is actively writing now, but it is
+ // the caller's responsibility to ensure that nobody writes until
+ // the returned reference is no longer in use.
+ // Also, `self.value.get()` refers to the value owned by `self`
+ // and is thus guaranteed to be valid for the lifetime of `self`.
+ Ok(unsafe { &*self.value.get() })
+ } else {
+ Err(BorrowError { _private: () })
+ }
+ }
+}
+
+impl<T: Default> RefCell<T> {
+ /// Takes the wrapped value, leaving `Default::default()` in its place.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is currently borrowed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(refcell_take)]
+ /// use std::cell::RefCell;
+ ///
+ /// let c = RefCell::new(5);
+ /// let five = c.take();
+ ///
+ /// assert_eq!(five, 5);
+ /// assert_eq!(c.into_inner(), 0);
+ /// ```
+ #[unstable(feature = "refcell_take", issue = "71395")]
+ pub fn take(&self) -> T {
+ self.replace(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized> Send for RefCell<T> where T: Send {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for RefCell<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value is currently mutably borrowed.
+ #[inline]
+ #[track_caller]
+ fn clone(&self) -> RefCell<T> {
+ RefCell::new(self.borrow().clone())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for RefCell<T> {
+ /// Creates a `RefCell<T>`, with the `Default` value for T.
+ #[inline]
+ fn default() -> RefCell<T> {
+ RefCell::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> PartialEq for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn eq(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() == *other.borrow()
+ }
+}
+
+#[stable(feature = "cell_eq", since = "1.2.0")]
+impl<T: ?Sized + Eq> Eq for RefCell<T> {}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn partial_cmp(&self, other: &RefCell<T>) -> Option<Ordering> {
+ self.borrow().partial_cmp(&*other.borrow())
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn lt(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() < *other.borrow()
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn le(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() <= *other.borrow()
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn gt(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() > *other.borrow()
+ }
+
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn ge(&self, other: &RefCell<T>) -> bool {
+ *self.borrow() >= *other.borrow()
+ }
+}
+
+#[stable(feature = "cell_ord", since = "1.10.0")]
+impl<T: ?Sized + Ord> Ord for RefCell<T> {
+ /// # Panics
+ ///
+ /// Panics if the value in either `RefCell` is currently borrowed.
+ #[inline]
+ fn cmp(&self, other: &RefCell<T>) -> Ordering {
+ self.borrow().cmp(&*other.borrow())
+ }
+}
+
+#[stable(feature = "cell_from", since = "1.12.0")]
+impl<T> From<T> for RefCell<T> {
+ fn from(t: T) -> RefCell<T> {
+ RefCell::new(t)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<RefCell<U>> for RefCell<T> {}
+
+struct BorrowRef<'b> {
+ borrow: &'b Cell<BorrowFlag>,
+}
+
+impl<'b> BorrowRef<'b> {
+ #[inline]
+ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRef<'b>> {
+ let b = borrow.get().wrapping_add(1);
+ if !is_reading(b) {
+ // Incrementing borrow can result in a non-reading value (<= 0) in these cases:
+ // 1. It was < 0, i.e. there are writing borrows, so we can't allow a read borrow
+ // due to Rust's reference aliasing rules
+ // 2. It was isize::MAX (the max amount of reading borrows) and it overflowed
+ // into isize::MIN (the max amount of writing borrows) so we can't allow
+ // an additional read borrow because isize can't represent so many read borrows
+ // (this can only happen if you mem::forget more than a small constant amount of
+ // `Ref`s, which is not good practice)
+ None
+ } else {
+ // Incrementing borrow can result in a reading value (> 0) in these cases:
+ // 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read borrow
+ // 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize
+ // is large enough to represent having one more read borrow
+ borrow.set(b);
+ Some(BorrowRef { borrow })
+ }
+ }
+}
+
+impl Drop for BorrowRef<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ let borrow = self.borrow.get();
+ debug_assert!(is_reading(borrow));
+ self.borrow.set(borrow - 1);
+ }
+}
+
+impl Clone for BorrowRef<'_> {
+ #[inline]
+ fn clone(&self) -> Self {
+ // Since this Ref exists, we know the borrow flag
+ // is a reading borrow.
+ let borrow = self.borrow.get();
+ debug_assert!(is_reading(borrow));
+ // Prevent the borrow counter from overflowing into
+ // a writing borrow.
+ assert!(borrow != isize::MAX);
+ self.borrow.set(borrow + 1);
+ BorrowRef { borrow: self.borrow }
+ }
+}
+
+/// Wraps a borrowed reference to a value in a `RefCell` box.
+/// A wrapper type for an immutably borrowed value from a `RefCell<T>`.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Ref<'b, T: ?Sized + 'b> {
+ value: &'b T,
+ borrow: BorrowRef<'b>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for Ref<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+impl<'b, T: ?Sized> Ref<'b, T> {
+ /// Copies a `Ref`.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::clone(...)`. A `Clone` implementation or a method would interfere
+ /// with the widespread use of `r.borrow().clone()` to clone the contents of
+ /// a `RefCell`.
+ #[stable(feature = "cell_extras", since = "1.15.0")]
+ #[inline]
+ pub fn clone(orig: &Ref<'b, T>) -> Ref<'b, T> {
+ Ref { value: orig.value, borrow: orig.borrow.clone() }
+ }
+
+ /// Makes a new `Ref` for a component of the borrowed data.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as `Ref::map(...)`.
+ /// A method would interfere with methods of the same name on the contents
+ /// of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, Ref};
+ ///
+ /// let c = RefCell::new((5, 'b'));
+ /// let b1: Ref<(u32, char)> = c.borrow();
+ /// let b2: Ref<u32> = Ref::map(b1, |t| &t.0);
+ /// assert_eq!(*b2, 5)
+ /// ```
+ #[stable(feature = "cell_map", since = "1.8.0")]
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: Ref<'b, T>, f: F) -> Ref<'b, U>
+ where
+ F: FnOnce(&T) -> &U,
+ {
+ Ref { value: f(orig.value), borrow: orig.borrow }
+ }
+
+ /// Splits a `Ref` into multiple `Ref`s for different components of the
+ /// borrowed data.
+ ///
+ /// The `RefCell` is already immutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::map_split(...)`. A method would interfere with methods of the same
+ /// name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{Ref, RefCell};
+ ///
+ /// let cell = RefCell::new([1, 2, 3, 4]);
+ /// let borrow = cell.borrow();
+ /// let (begin, end) = Ref::map_split(borrow, |slice| slice.split_at(2));
+ /// assert_eq!(*begin, [1, 2]);
+ /// assert_eq!(*end, [3, 4]);
+ /// ```
+ #[stable(feature = "refcell_map_split", since = "1.35.0")]
+ #[inline]
+ pub fn map_split<U: ?Sized, V: ?Sized, F>(orig: Ref<'b, T>, f: F) -> (Ref<'b, U>, Ref<'b, V>)
+ where
+ F: FnOnce(&T) -> (&U, &V),
+ {
+ let (a, b) = f(orig.value);
+ let borrow = orig.borrow.clone();
+ (Ref { value: a, borrow }, Ref { value: b, borrow: orig.borrow })
+ }
+
+ /// Convert into a reference to the underlying data.
+ ///
+ /// The underlying `RefCell` can never be mutably borrowed from again and will always appear
+ /// already immutably borrowed. It is not a good idea to leak more than a constant number of
+ /// references. The `RefCell` can be immutably borrowed again if only a smaller number of leaks
+ /// have occurred in total.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `Ref::leak(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_leak)]
+ /// use std::cell::{RefCell, Ref};
+ /// let cell = RefCell::new(0);
+ ///
+ /// let value = Ref::leak(cell.borrow());
+ /// assert_eq!(*value, 0);
+ ///
+ /// assert!(cell.try_borrow().is_ok());
+ /// assert!(cell.try_borrow_mut().is_err());
+ /// ```
+ #[unstable(feature = "cell_leak", issue = "69099")]
+ pub fn leak(orig: Ref<'b, T>) -> &'b T {
+ // By forgetting this Ref we ensure that the borrow counter in the RefCell can't go back to
+ // UNUSED within the lifetime `'b`. Resetting the reference tracking state would require a
+ // unique reference to the borrowed RefCell. No further mutable references can be created
+ // from the original cell.
+ mem::forget(orig.borrow);
+ orig.value
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Ref<'b, U>> for Ref<'b, T> {}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for Ref<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.value.fmt(f)
+ }
+}
+
+impl<'b, T: ?Sized> RefMut<'b, T> {
+ /// Makes a new `RefMut` for a component of the borrowed data, e.g., an enum
+ /// variant.
+ ///
+ /// The `RefCell` is already mutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::map(...)`. A method would interfere with methods of the same
+ /// name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, RefMut};
+ ///
+ /// let c = RefCell::new((5, 'b'));
+ /// {
+ /// let b1: RefMut<(u32, char)> = c.borrow_mut();
+ /// let mut b2: RefMut<u32> = RefMut::map(b1, |t| &mut t.0);
+ /// assert_eq!(*b2, 5);
+ /// *b2 = 42;
+ /// }
+ /// assert_eq!(*c.borrow(), (42, 'b'));
+ /// ```
+ #[stable(feature = "cell_map", since = "1.8.0")]
+ #[inline]
+ pub fn map<U: ?Sized, F>(orig: RefMut<'b, T>, f: F) -> RefMut<'b, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ // FIXME(nll-rfc#40): fix borrow-check
+ let RefMut { value, borrow } = orig;
+ RefMut { value: f(value), borrow }
+ }
+
+ /// Splits a `RefMut` into multiple `RefMut`s for different components of the
+ /// borrowed data.
+ ///
+ /// The underlying `RefCell` will remain mutably borrowed until both
+ /// returned `RefMut`s go out of scope.
+ ///
+ /// The `RefCell` is already mutably borrowed, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::map_split(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::{RefCell, RefMut};
+ ///
+ /// let cell = RefCell::new([1, 2, 3, 4]);
+ /// let borrow = cell.borrow_mut();
+ /// let (mut begin, mut end) = RefMut::map_split(borrow, |slice| slice.split_at_mut(2));
+ /// assert_eq!(*begin, [1, 2]);
+ /// assert_eq!(*end, [3, 4]);
+ /// begin.copy_from_slice(&[4, 3]);
+ /// end.copy_from_slice(&[2, 1]);
+ /// ```
+ #[stable(feature = "refcell_map_split", since = "1.35.0")]
+ #[inline]
+ pub fn map_split<U: ?Sized, V: ?Sized, F>(
+ orig: RefMut<'b, T>,
+ f: F,
+ ) -> (RefMut<'b, U>, RefMut<'b, V>)
+ where
+ F: FnOnce(&mut T) -> (&mut U, &mut V),
+ {
+ let (a, b) = f(orig.value);
+ let borrow = orig.borrow.clone();
+ (RefMut { value: a, borrow }, RefMut { value: b, borrow: orig.borrow })
+ }
+
+ /// Convert into a mutable reference to the underlying data.
+ ///
+ /// The underlying `RefCell` can not be borrowed from again and will always appear already
+ /// mutably borrowed, making the returned reference the only to the interior.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RefMut::leak(...)`. A method would interfere with methods of the
+ /// same name on the contents of a `RefCell` used through `Deref`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cell_leak)]
+ /// use std::cell::{RefCell, RefMut};
+ /// let cell = RefCell::new(0);
+ ///
+ /// let value = RefMut::leak(cell.borrow_mut());
+ /// assert_eq!(*value, 0);
+ /// *value = 1;
+ ///
+ /// assert!(cell.try_borrow_mut().is_err());
+ /// ```
+ #[unstable(feature = "cell_leak", issue = "69099")]
+ pub fn leak(orig: RefMut<'b, T>) -> &'b mut T {
+ // By forgetting this BorrowRefMut we ensure that the borrow counter in the RefCell can't
+ // go back to UNUSED within the lifetime `'b`. Resetting the reference tracking state would
+ // require a unique reference to the borrowed RefCell. No further references can be created
+ // from the original cell within that lifetime, making the current borrow the only
+ // reference for the remaining lifetime.
+ mem::forget(orig.borrow);
+ orig.value
+ }
+}
+
+struct BorrowRefMut<'b> {
+ borrow: &'b Cell<BorrowFlag>,
+}
+
+impl Drop for BorrowRefMut<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ let borrow = self.borrow.get();
+ debug_assert!(is_writing(borrow));
+ self.borrow.set(borrow + 1);
+ }
+}
+
+impl<'b> BorrowRefMut<'b> {
+ #[inline]
+ fn new(borrow: &'b Cell<BorrowFlag>) -> Option<BorrowRefMut<'b>> {
+ // NOTE: Unlike BorrowRefMut::clone, new is called to create the initial
+ // mutable reference, and so there must currently be no existing
+ // references. Thus, while clone increments the mutable refcount, here
+ // we explicitly only allow going from UNUSED to UNUSED - 1.
+ match borrow.get() {
+ UNUSED => {
+ borrow.set(UNUSED - 1);
+ Some(BorrowRefMut { borrow })
+ }
+ _ => None,
+ }
+ }
+
+ // Clones a `BorrowRefMut`.
+ //
+ // This is only valid if each `BorrowRefMut` is used to track a mutable
+ // reference to a distinct, nonoverlapping range of the original object.
+ // This isn't in a Clone impl so that code doesn't call this implicitly.
+ #[inline]
+ fn clone(&self) -> BorrowRefMut<'b> {
+ let borrow = self.borrow.get();
+ debug_assert!(is_writing(borrow));
+ // Prevent the borrow counter from underflowing.
+ assert!(borrow != isize::MIN);
+ self.borrow.set(borrow - 1);
+ BorrowRefMut { borrow: self.borrow }
+ }
+}
+
+/// A wrapper type for a mutably borrowed value from a `RefCell<T>`.
+///
+/// See the [module-level documentation](self) for more.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RefMut<'b, T: ?Sized + 'b> {
+ value: &'b mut T,
+ borrow: BorrowRefMut<'b>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for RefMut<'_, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for RefMut<'_, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ self.value
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'b, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<RefMut<'b, U>> for RefMut<'b, T> {}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for RefMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.value.fmt(f)
+ }
+}
+
+/// The core primitive for interior mutability in Rust.
+///
+/// `UnsafeCell<T>` is a type that wraps some `T` and indicates unsafe interior operations on the
+/// wrapped type. Types with an `UnsafeCell<T>` field are considered to have an 'unsafe interior'.
+/// The `UnsafeCell<T>` type is the only legal way to obtain aliasable data that is considered
+/// mutable. In general, transmuting an `&T` type into an `&mut T` is considered undefined behavior.
+///
+/// If you have a reference `&SomeStruct`, then normally in Rust all fields of `SomeStruct` are
+/// immutable. The compiler makes optimizations based on the knowledge that `&T` is not mutably
+/// aliased or mutated, and that `&mut T` is unique. `UnsafeCell<T>` is the only core language
+/// feature to work around the restriction that `&T` may not be mutated. All other types that
+/// allow internal mutability, such as `Cell<T>` and `RefCell<T>`, use `UnsafeCell` to wrap their
+/// internal data. There is *no* legal way to obtain aliasing `&mut`, not even with `UnsafeCell<T>`.
+///
+/// The `UnsafeCell` API itself is technically very simple: [`.get()`] gives you a raw pointer
+/// `*mut T` to its contents. It is up to _you_ as the abstraction designer to use that raw pointer
+/// correctly.
+///
+/// [`.get()`]: `UnsafeCell::get`
+///
+/// The precise Rust aliasing rules are somewhat in flux, but the main points are not contentious:
+///
+/// - If you create a safe reference with lifetime `'a` (either a `&T` or `&mut T`
+/// reference) that is accessible by safe code (for example, because you returned it),
+/// then you must not access the data in any way that contradicts that reference for the
+/// remainder of `'a`. For example, this means that if you take the `*mut T` from an
+/// `UnsafeCell<T>` and cast it to an `&T`, then the data in `T` must remain immutable
+/// (modulo any `UnsafeCell` data found within `T`, of course) until that reference's
+/// lifetime expires. Similarly, if you create a `&mut T` reference that is released to
+/// safe code, then you must not access the data within the `UnsafeCell` until that
+/// reference expires.
+///
+/// - At all times, you must avoid data races. If multiple threads have access to
+/// the same `UnsafeCell`, then any writes must have a proper happens-before relation to all other
+/// accesses (or use atomics).
+///
+/// To assist with proper design, the following scenarios are explicitly declared legal
+/// for single-threaded code:
+///
+/// 1. A `&T` reference can be released to safe code and there it can co-exist with other `&T`
+/// references, but not with a `&mut T`
+///
+/// 2. A `&mut T` reference may be released to safe code provided neither other `&mut T` nor `&T`
+/// co-exist with it. A `&mut T` must always be unique.
+///
+/// Note that whilst mutating the contents of an `&UnsafeCell<T>` (even while other
+/// `&UnsafeCell<T>` references alias the cell) is
+/// ok (provided you enforce the above invariants some other way), it is still undefined behavior
+/// to have multiple `&mut UnsafeCell<T>` aliases. That is, `UnsafeCell` is a wrapper
+/// designed to have a special interaction with _shared_ accesses (_i.e._, through an
+/// `&UnsafeCell<_>` reference); there is no magic whatsoever when dealing with _exclusive_
+/// accesses (_e.g._, through an `&mut UnsafeCell<_>`): neither the cell nor the wrapped value
+/// may be aliased for the duration of that `&mut` borrow.
+/// This is showcased by the [`.get_mut()`] accessor, which is a non-`unsafe` getter that yields
+/// a `&mut T`.
+///
+/// [`.get_mut()`]: `UnsafeCell::get_mut`
+///
+/// # Examples
+///
+/// Here is an example showcasing how to soundly mutate the contents of an `UnsafeCell<_>` despite
+/// there being multiple references aliasing the cell:
+///
+/// ```
+/// use std::cell::UnsafeCell;
+///
+/// let x: UnsafeCell<i32> = 42.into();
+/// // Get multiple / concurrent / shared references to the same `x`.
+/// let (p1, p2): (&UnsafeCell<i32>, &UnsafeCell<i32>) = (&x, &x);
+///
+/// unsafe {
+/// // SAFETY: within this scope there are no other references to `x`'s contents,
+/// // so ours is effectively unique.
+/// let p1_exclusive: &mut i32 = &mut *p1.get(); // -- borrow --+
+/// *p1_exclusive += 27; // |
+/// } // <---------- cannot go beyond this point -------------------+
+///
+/// unsafe {
+/// // SAFETY: within this scope nobody expects to have exclusive access to `x`'s contents,
+/// // so we can have multiple shared accesses concurrently.
+/// let p2_shared: &i32 = &*p2.get();
+/// assert_eq!(*p2_shared, 42 + 27);
+/// let p1_shared: &i32 = &*p1.get();
+/// assert_eq!(*p1_shared, *p2_shared);
+/// }
+/// ```
+///
+/// The following example showcases the fact that exclusive access to an `UnsafeCell<T>`
+/// implies exclusive access to its `T`:
+///
+/// ```rust
+/// #![feature(unsafe_cell_get_mut)]
+/// #![forbid(unsafe_code)] // with exclusive accesses,
+/// // `UnsafeCell` is a transparent no-op wrapper,
+/// // so no need for `unsafe` here.
+/// use std::cell::UnsafeCell;
+///
+/// let mut x: UnsafeCell<i32> = 42.into();
+///
+/// // Get a compile-time-checked unique reference to `x`.
+/// let p_unique: &mut UnsafeCell<i32> = &mut x;
+/// // With an exclusive reference, we can mutate the contents for free.
+/// *p_unique.get_mut() = 0;
+/// // Or, equivalently:
+/// x = UnsafeCell::new(0);
+///
+/// // When we own the value, we can extract the contents for free.
+/// let contents: i32 = x.into_inner();
+/// assert_eq!(contents, 0);
+/// ```
+#[lang = "unsafe_cell"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(transparent)]
+#[repr(no_niche)] // rust-lang/rust#68303.
+pub struct UnsafeCell<T: ?Sized> {
+ value: T,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for UnsafeCell<T> {}
+
+impl<T> UnsafeCell<T> {
+ /// Constructs a new instance of `UnsafeCell` which will wrap the specified
+ /// value.
+ ///
+ /// All access to the inner value through methods is `unsafe`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let uc = UnsafeCell::new(5);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_unsafe_cell_new", since = "1.32.0")]
+ #[inline]
+ pub const fn new(value: T) -> UnsafeCell<T> {
+ UnsafeCell { value }
+ }
+
+ /// Unwraps the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let uc = UnsafeCell::new(5);
+ ///
+ /// let five = uc.into_inner();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> T {
+ self.value
+ }
+}
+
+impl<T: ?Sized> UnsafeCell<T> {
+ /// Gets a mutable pointer to the wrapped value.
+ ///
+ /// This can be cast to a pointer of any kind.
+ /// Ensure that the access is unique (no active references, mutable or not)
+ /// when casting to `&mut T`, and ensure that there are no mutations
+ /// or mutable aliases going on when casting to `&T`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let uc = UnsafeCell::new(5);
+ ///
+ /// let five = uc.get();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_unsafecell_get", since = "1.32.0")]
+ pub const fn get(&self) -> *mut T {
+ // We can just cast the pointer from `UnsafeCell<T>` to `T` because of
+ // #[repr(transparent)]. This exploits libstd's special status, there is
+ // no guarantee for user code that this will work in future versions of the compiler!
+ self as *const UnsafeCell<T> as *const T as *mut T
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// This call borrows the `UnsafeCell` mutably (at compile-time) which
+ /// guarantees that we possess the only reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(unsafe_cell_get_mut)]
+ /// use std::cell::UnsafeCell;
+ ///
+ /// let mut c = UnsafeCell::new(5);
+ /// *c.get_mut() += 1;
+ ///
+ /// assert_eq!(*c.get_mut(), 6);
+ /// ```
+ #[inline]
+ #[unstable(feature = "unsafe_cell_get_mut", issue = "76943")]
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+
+ /// Gets a mutable pointer to the wrapped value.
+ /// The difference to [`get`] is that this function accepts a raw pointer,
+ /// which is useful to avoid the creation of temporary references.
+ ///
+ /// The result can be cast to a pointer of any kind.
+ /// Ensure that the access is unique (no active references, mutable or not)
+ /// when casting to `&mut T`, and ensure that there are no mutations
+ /// or mutable aliases going on when casting to `&T`.
+ ///
+ /// [`get`]: #method.get
+ ///
+ /// # Examples
+ ///
+ /// Gradual initialization of an `UnsafeCell` requires `raw_get`, as
+ /// calling `get` would require creating a reference to uninitialized data:
+ ///
+ /// ```
+ /// #![feature(unsafe_cell_raw_get)]
+ /// use std::cell::UnsafeCell;
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let m = MaybeUninit::<UnsafeCell<i32>>::uninit();
+ /// unsafe { UnsafeCell::raw_get(m.as_ptr()).write(5); }
+ /// let uc = unsafe { m.assume_init() };
+ ///
+ /// assert_eq!(uc.into_inner(), 5);
+ /// ```
+ #[inline]
+ #[unstable(feature = "unsafe_cell_raw_get", issue = "66358")]
+ pub const fn raw_get(this: *const Self) -> *mut T {
+ // We can just cast the pointer from `UnsafeCell<T>` to `T` because of
+ // #[repr(transparent)]. This exploits libstd's special status, there is
+ // no guarantee for user code that this will work in future versions of the compiler!
+ this as *const T as *mut T
+ }
+}
+
+#[stable(feature = "unsafe_cell_default", since = "1.10.0")]
+impl<T: Default> Default for UnsafeCell<T> {
+ /// Creates an `UnsafeCell`, with the `Default` value for T.
+ fn default() -> UnsafeCell<T> {
+ UnsafeCell::new(Default::default())
+ }
+}
+
+#[stable(feature = "cell_from", since = "1.12.0")]
+impl<T> From<T> for UnsafeCell<T> {
+ fn from(t: T) -> UnsafeCell<T> {
+ UnsafeCell::new(t)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<UnsafeCell<U>> for UnsafeCell<T> {}
+
+#[allow(unused)]
+fn assert_coerce_unsized(a: UnsafeCell<&i32>, b: Cell<&i32>, c: RefCell<&i32>) {
+ let _: UnsafeCell<&dyn Send> = a;
+ let _: Cell<&dyn Send> = b;
+ let _: RefCell<&dyn Send> = c;
+}
--- /dev/null
+//! Character conversions.
+
+use crate::convert::TryFrom;
+use crate::fmt;
+use crate::mem::transmute;
+use crate::str::FromStr;
+
+use super::MAX;
+
+/// Converts a `u32` to a `char`.
+///
+/// Note that all [`char`]s are valid [`u32`]s, and can be cast to one with
+/// `as`:
+///
+/// ```
+/// let c = '💯';
+/// let i = c as u32;
+///
+/// assert_eq!(128175, i);
+/// ```
+///
+/// However, the reverse is not true: not all valid [`u32`]s are valid
+/// [`char`]s. `from_u32()` will return `None` if the input is not a valid value
+/// for a [`char`].
+///
+/// For an unsafe version of this function which ignores these checks, see
+/// [`from_u32_unchecked`].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::char;
+///
+/// let c = char::from_u32(0x2764);
+///
+/// assert_eq!(Some('❤'), c);
+/// ```
+///
+/// Returning `None` when the input is not a valid [`char`]:
+///
+/// ```
+/// use std::char;
+///
+/// let c = char::from_u32(0x110000);
+///
+/// assert_eq!(None, c);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn from_u32(i: u32) -> Option<char> {
+ char::try_from(i).ok()
+}
+
+/// Converts a `u32` to a `char`, ignoring validity.
+///
+/// Note that all [`char`]s are valid [`u32`]s, and can be cast to one with
+/// `as`:
+///
+/// ```
+/// let c = '💯';
+/// let i = c as u32;
+///
+/// assert_eq!(128175, i);
+/// ```
+///
+/// However, the reverse is not true: not all valid [`u32`]s are valid
+/// [`char`]s. `from_u32_unchecked()` will ignore this, and blindly cast to
+/// [`char`], possibly creating an invalid one.
+///
+/// # Safety
+///
+/// This function is unsafe, as it may construct invalid `char` values.
+///
+/// For a safe version of this function, see the [`from_u32`] function.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::char;
+///
+/// let c = unsafe { char::from_u32_unchecked(0x2764) };
+///
+/// assert_eq!('❤', c);
+/// ```
+#[inline]
+#[stable(feature = "char_from_unchecked", since = "1.5.0")]
+pub unsafe fn from_u32_unchecked(i: u32) -> char {
+ // SAFETY: the caller must guarantee that `i` is a valid char value.
+ if cfg!(debug_assertions) { char::from_u32(i).unwrap() } else { unsafe { transmute(i) } }
+}
+
+#[stable(feature = "char_convert", since = "1.13.0")]
+impl From<char> for u32 {
+ /// Converts a [`char`] into a [`u32`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// let c = 'c';
+ /// let u = u32::from(c);
+ /// assert!(4 == mem::size_of_val(&u))
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ c as u32
+ }
+}
+
+/// Maps a byte in 0x00..=0xFF to a `char` whose code point has the same value, in U+0000..=U+00FF.
+///
+/// Unicode is designed such that this effectively decodes bytes
+/// with the character encoding that IANA calls ISO-8859-1.
+/// This encoding is compatible with ASCII.
+///
+/// Note that this is different from ISO/IEC 8859-1 a.k.a. ISO 8859-1 (with one less hyphen),
+/// which leaves some "blanks", byte values that are not assigned to any character.
+/// ISO-8859-1 (the IANA one) assigns them to the C0 and C1 control codes.
+///
+/// Note that this is *also* different from Windows-1252 a.k.a. code page 1252,
+/// which is a superset ISO/IEC 8859-1 that assigns some (not all!) blanks
+/// to punctuation and various Latin characters.
+///
+/// To confuse things further, [on the Web](https://encoding.spec.whatwg.org/)
+/// `ascii`, `iso-8859-1`, and `windows-1252` are all aliases
+/// for a superset of Windows-1252 that fills the remaining blanks with corresponding
+/// C0 and C1 control codes.
+#[stable(feature = "char_convert", since = "1.13.0")]
+impl From<u8> for char {
+ /// Converts a [`u8`] into a [`char`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// let u = 32 as u8;
+ /// let c = char::from(u);
+ /// assert!(4 == mem::size_of_val(&c))
+ /// ```
+ #[inline]
+ fn from(i: u8) -> Self {
+ i as char
+ }
+}
+
+/// An error which can be returned when parsing a char.
+#[stable(feature = "char_from_str", since = "1.20.0")]
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct ParseCharError {
+ kind: CharErrorKind,
+}
+
+impl ParseCharError {
+ #[unstable(
+ feature = "char_error_internals",
+ reason = "this method should not be available publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ CharErrorKind::EmptyString => "cannot parse char from empty string",
+ CharErrorKind::TooManyChars => "too many characters in string",
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum CharErrorKind {
+ EmptyString,
+ TooManyChars,
+}
+
+#[stable(feature = "char_from_str", since = "1.20.0")]
+impl fmt::Display for ParseCharError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+#[stable(feature = "char_from_str", since = "1.20.0")]
+impl FromStr for char {
+ type Err = ParseCharError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ let mut chars = s.chars();
+ match (chars.next(), chars.next()) {
+ (None, _) => Err(ParseCharError { kind: CharErrorKind::EmptyString }),
+ (Some(c), None) => Ok(c),
+ _ => Err(ParseCharError { kind: CharErrorKind::TooManyChars }),
+ }
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl TryFrom<u32> for char {
+ type Error = CharTryFromError;
+
+ #[inline]
+ fn try_from(i: u32) -> Result<Self, Self::Error> {
+ if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
+ Err(CharTryFromError(()))
+ } else {
+ // SAFETY: checked that it's a legal unicode value
+ Ok(unsafe { transmute(i) })
+ }
+ }
+}
+
+/// The error type returned when a conversion from u32 to char fails.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct CharTryFromError(());
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl fmt::Display for CharTryFromError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "converted integer out of range for `char`".fmt(f)
+ }
+}
+
+/// Converts a digit in the given radix to a `char`.
+///
+/// A 'radix' here is sometimes also called a 'base'. A radix of two
+/// indicates a binary number, a radix of ten, decimal, and a radix of
+/// sixteen, hexadecimal, to give some common values. Arbitrary
+/// radices are supported.
+///
+/// `from_digit()` will return `None` if the input is not a digit in
+/// the given radix.
+///
+/// # Panics
+///
+/// Panics if given a radix larger than 36.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::char;
+///
+/// let c = char::from_digit(4, 10);
+///
+/// assert_eq!(Some('4'), c);
+///
+/// // Decimal 11 is a single digit in base 16
+/// let c = char::from_digit(11, 16);
+///
+/// assert_eq!(Some('b'), c);
+/// ```
+///
+/// Returning `None` when the input is not a digit:
+///
+/// ```
+/// use std::char;
+///
+/// let c = char::from_digit(20, 10);
+///
+/// assert_eq!(None, c);
+/// ```
+///
+/// Passing a large radix, causing a panic:
+///
+/// ```should_panic
+/// use std::char;
+///
+/// // this panics
+/// let c = char::from_digit(1, 37);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn from_digit(num: u32, radix: u32) -> Option<char> {
+ if radix > 36 {
+ panic!("from_digit: radix is too high (maximum 36)");
+ }
+ if num < radix {
+ let num = num as u8;
+ if num < 10 { Some((b'0' + num) as char) } else { Some((b'a' + num - 10) as char) }
+ } else {
+ None
+ }
+}
--- /dev/null
+//! UTF-8 and UTF-16 decoding iterators
+
+use crate::fmt;
+
+use super::from_u32_unchecked;
+
+/// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s.
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[derive(Clone, Debug)]
+pub struct DecodeUtf16<I>
+where
+ I: Iterator<Item = u16>,
+{
+ iter: I,
+ buf: Option<u16>,
+}
+
+/// An error that can be returned when decoding UTF-16 code points.
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct DecodeUtf16Error {
+ code: u16,
+}
+
+/// Creates an iterator over the UTF-16 encoded code points in `iter`,
+/// returning unpaired surrogates as `Err`s.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::char::decode_utf16;
+///
+/// // 𝄞mus<invalid>ic<invalid>
+/// let v = [
+/// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834,
+/// ];
+///
+/// assert_eq!(
+/// decode_utf16(v.iter().cloned())
+/// .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+/// .collect::<Vec<_>>(),
+/// vec![
+/// Ok('𝄞'),
+/// Ok('m'), Ok('u'), Ok('s'),
+/// Err(0xDD1E),
+/// Ok('i'), Ok('c'),
+/// Err(0xD834)
+/// ]
+/// );
+/// ```
+///
+/// A lossy decoder can be obtained by replacing `Err` results with the replacement character:
+///
+/// ```
+/// use std::char::{decode_utf16, REPLACEMENT_CHARACTER};
+///
+/// // 𝄞mus<invalid>ic<invalid>
+/// let v = [
+/// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834,
+/// ];
+///
+/// assert_eq!(
+/// decode_utf16(v.iter().cloned())
+/// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER))
+/// .collect::<String>(),
+/// "𝄞mus�ic�"
+/// );
+/// ```
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+#[inline]
+pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
+ DecodeUtf16 { iter: iter.into_iter(), buf: None }
+}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl<I: Iterator<Item = u16>> Iterator for DecodeUtf16<I> {
+ type Item = Result<char, DecodeUtf16Error>;
+
+ fn next(&mut self) -> Option<Result<char, DecodeUtf16Error>> {
+ let u = match self.buf.take() {
+ Some(buf) => buf,
+ None => self.iter.next()?,
+ };
+
+ if u < 0xD800 || 0xDFFF < u {
+ // SAFETY: not a surrogate
+ Some(Ok(unsafe { from_u32_unchecked(u as u32) }))
+ } else if u >= 0xDC00 {
+ // a trailing surrogate
+ Some(Err(DecodeUtf16Error { code: u }))
+ } else {
+ let u2 = match self.iter.next() {
+ Some(u2) => u2,
+ // eof
+ None => return Some(Err(DecodeUtf16Error { code: u })),
+ };
+ if u2 < 0xDC00 || u2 > 0xDFFF {
+ // not a trailing surrogate so we're not a valid
+ // surrogate pair, so rewind to redecode u2 next time.
+ self.buf = Some(u2);
+ return Some(Err(DecodeUtf16Error { code: u }));
+ }
+
+ // all ok, so lets decode it.
+ let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000;
+ // SAFETY: we checked that it's a legal unicode value
+ Some(Ok(unsafe { from_u32_unchecked(c) }))
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (low, high) = self.iter.size_hint();
+ // we could be entirely valid surrogates (2 elements per
+ // char), or entirely non-surrogates (1 element per char)
+ (low / 2, high)
+ }
+}
+
+impl DecodeUtf16Error {
+ /// Returns the unpaired surrogate which caused this error.
+ #[stable(feature = "decode_utf16", since = "1.9.0")]
+ pub fn unpaired_surrogate(&self) -> u16 {
+ self.code
+ }
+}
+
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+impl fmt::Display for DecodeUtf16Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "unpaired surrogate found: {:x}", self.code)
+ }
+}
--- /dev/null
+//! impl char {}
+
+use crate::slice;
+use crate::str::from_utf8_unchecked_mut;
+use crate::unicode::printable::is_printable;
+use crate::unicode::{self, conversions};
+
+use super::*;
+
+#[lang = "char"]
+impl char {
+ /// The highest valid code point a `char` can have.
+ ///
+ /// A `char` is a [Unicode Scalar Value], which means that it is a [Code
+ /// Point], but only ones within a certain range. `MAX` is the highest valid
+ /// code point that's a valid [Unicode Scalar Value].
+ ///
+ /// [Unicode Scalar Value]: http://www.unicode.org/glossary/#unicode_scalar_value
+ /// [Code Point]: http://www.unicode.org/glossary/#code_point
+ #[unstable(feature = "assoc_char_consts", reason = "recently added", issue = "71763")]
+ pub const MAX: char = '\u{10ffff}';
+
+ /// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a
+ /// decoding error.
+ ///
+ /// It can occur, for example, when giving ill-formed UTF-8 bytes to
+ /// [`String::from_utf8_lossy`](string/struct.String.html#method.from_utf8_lossy).
+ #[unstable(feature = "assoc_char_consts", reason = "recently added", issue = "71763")]
+ pub const REPLACEMENT_CHARACTER: char = '\u{FFFD}';
+
+ /// The version of [Unicode](http://www.unicode.org/) that the Unicode parts of
+ /// `char` and `str` methods are based on.
+ ///
+ /// New versions of Unicode are released regularly and subsequently all methods
+ /// in the standard library depending on Unicode are updated. Therefore the
+ /// behavior of some `char` and `str` methods and the value of this constant
+ /// changes over time. This is *not* considered to be a breaking change.
+ ///
+ /// The version numbering scheme is explained in
+ /// [Unicode 11.0 or later, Section 3.1 Versions of the Unicode Standard](https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf#page=4).
+ #[unstable(feature = "assoc_char_consts", reason = "recently added", issue = "71763")]
+ pub const UNICODE_VERSION: (u8, u8, u8) = crate::unicode::UNICODE_VERSION;
+
+ /// Creates an iterator over the UTF-16 encoded code points in `iter`,
+ /// returning unpaired surrogates as `Err`s.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char::decode_utf16;
+ ///
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = [
+ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834,
+ /// ];
+ ///
+ /// assert_eq!(
+ /// decode_utf16(v.iter().cloned())
+ /// .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+ /// .collect::<Vec<_>>(),
+ /// vec![
+ /// Ok('𝄞'),
+ /// Ok('m'), Ok('u'), Ok('s'),
+ /// Err(0xDD1E),
+ /// Ok('i'), Ok('c'),
+ /// Err(0xD834)
+ /// ]
+ /// );
+ /// ```
+ ///
+ /// A lossy decoder can be obtained by replacing `Err` results with the replacement character:
+ ///
+ /// ```
+ /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER};
+ ///
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = [
+ /// 0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0xDD1E, 0x0069, 0x0063, 0xD834,
+ /// ];
+ ///
+ /// assert_eq!(
+ /// decode_utf16(v.iter().cloned())
+ /// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER))
+ /// .collect::<String>(),
+ /// "𝄞mus�ic�"
+ /// );
+ /// ```
+ #[unstable(feature = "assoc_char_funcs", reason = "recently added", issue = "71763")]
+ #[inline]
+ pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
+ super::decode::decode_utf16(iter)
+ }
+
+ /// Converts a `u32` to a `char`.
+ ///
+ /// Note that all `char`s are valid [`u32`]s, and can be cast to one with
+ /// `as`:
+ ///
+ /// ```
+ /// let c = '💯';
+ /// let i = c as u32;
+ ///
+ /// assert_eq!(128175, i);
+ /// ```
+ ///
+ /// However, the reverse is not true: not all valid [`u32`]s are valid
+ /// `char`s. `from_u32()` will return `None` if the input is not a valid value
+ /// for a `char`.
+ ///
+ /// [`u32`]: primitive.u32.html
+ ///
+ /// For an unsafe version of this function which ignores these checks, see
+ /// [`from_u32_unchecked`].
+ ///
+ /// [`from_u32_unchecked`]: #method.from_u32_unchecked
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_u32(0x2764);
+ ///
+ /// assert_eq!(Some('❤'), c);
+ /// ```
+ ///
+ /// Returning `None` when the input is not a valid `char`:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_u32(0x110000);
+ ///
+ /// assert_eq!(None, c);
+ /// ```
+ #[unstable(feature = "assoc_char_funcs", reason = "recently added", issue = "71763")]
+ #[inline]
+ pub fn from_u32(i: u32) -> Option<char> {
+ super::convert::from_u32(i)
+ }
+
+ /// Converts a `u32` to a `char`, ignoring validity.
+ ///
+ /// Note that all `char`s are valid [`u32`]s, and can be cast to one with
+ /// `as`:
+ ///
+ /// ```
+ /// let c = '💯';
+ /// let i = c as u32;
+ ///
+ /// assert_eq!(128175, i);
+ /// ```
+ ///
+ /// However, the reverse is not true: not all valid [`u32`]s are valid
+ /// `char`s. `from_u32_unchecked()` will ignore this, and blindly cast to
+ /// `char`, possibly creating an invalid one.
+ ///
+ /// [`u32`]: primitive.u32.html
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe, as it may construct invalid `char` values.
+ ///
+ /// For a safe version of this function, see the [`from_u32`] function.
+ ///
+ /// [`from_u32`]: #method.from_u32
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = unsafe { char::from_u32_unchecked(0x2764) };
+ ///
+ /// assert_eq!('❤', c);
+ /// ```
+ #[unstable(feature = "assoc_char_funcs", reason = "recently added", issue = "71763")]
+ #[inline]
+ pub unsafe fn from_u32_unchecked(i: u32) -> char {
+ // SAFETY: the safety contract must be upheld by the caller.
+ unsafe { super::convert::from_u32_unchecked(i) }
+ }
+
+ /// Converts a digit in the given radix to a `char`.
+ ///
+ /// A 'radix' here is sometimes also called a 'base'. A radix of two
+ /// indicates a binary number, a radix of ten, decimal, and a radix of
+ /// sixteen, hexadecimal, to give some common values. Arbitrary
+ /// radices are supported.
+ ///
+ /// `from_digit()` will return `None` if the input is not a digit in
+ /// the given radix.
+ ///
+ /// # Panics
+ ///
+ /// Panics if given a radix larger than 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_digit(4, 10);
+ ///
+ /// assert_eq!(Some('4'), c);
+ ///
+ /// // Decimal 11 is a single digit in base 16
+ /// let c = char::from_digit(11, 16);
+ ///
+ /// assert_eq!(Some('b'), c);
+ /// ```
+ ///
+ /// Returning `None` when the input is not a digit:
+ ///
+ /// ```
+ /// use std::char;
+ ///
+ /// let c = char::from_digit(20, 10);
+ ///
+ /// assert_eq!(None, c);
+ /// ```
+ ///
+ /// Passing a large radix, causing a panic:
+ ///
+ /// ```should_panic
+ /// use std::char;
+ ///
+ /// // this panics
+ /// char::from_digit(1, 37);
+ /// ```
+ #[unstable(feature = "assoc_char_funcs", reason = "recently added", issue = "71763")]
+ #[inline]
+ pub fn from_digit(num: u32, radix: u32) -> Option<char> {
+ super::convert::from_digit(num, radix)
+ }
+
+ /// Checks if a `char` is a digit in the given radix.
+ ///
+ /// A 'radix' here is sometimes also called a 'base'. A radix of two
+ /// indicates a binary number, a radix of ten, decimal, and a radix of
+ /// sixteen, hexadecimal, to give some common values. Arbitrary
+ /// radices are supported.
+ ///
+ /// Compared to `is_numeric()`, this function only recognizes the characters
+ /// `0-9`, `a-z` and `A-Z`.
+ ///
+ /// 'Digit' is defined to be only the following characters:
+ ///
+ /// * `0-9`
+ /// * `a-z`
+ /// * `A-Z`
+ ///
+ /// For a more comprehensive understanding of 'digit', see [`is_numeric`][is_numeric].
+ ///
+ /// [is_numeric]: #method.is_numeric
+ ///
+ /// # Panics
+ ///
+ /// Panics if given a radix larger than 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('1'.is_digit(10));
+ /// assert!('f'.is_digit(16));
+ /// assert!(!'f'.is_digit(10));
+ /// ```
+ ///
+ /// Passing a large radix, causing a panic:
+ ///
+ /// ```should_panic
+ /// // this panics
+ /// '1'.is_digit(37);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_digit(self, radix: u32) -> bool {
+ self.to_digit(radix).is_some()
+ }
+
+ /// Converts a `char` to a digit in the given radix.
+ ///
+ /// A 'radix' here is sometimes also called a 'base'. A radix of two
+ /// indicates a binary number, a radix of ten, decimal, and a radix of
+ /// sixteen, hexadecimal, to give some common values. Arbitrary
+ /// radices are supported.
+ ///
+ /// 'Digit' is defined to be only the following characters:
+ ///
+ /// * `0-9`
+ /// * `a-z`
+ /// * `A-Z`
+ ///
+ /// # Errors
+ ///
+ /// Returns `None` if the `char` does not refer to a digit in the given radix.
+ ///
+ /// # Panics
+ ///
+ /// Panics if given a radix larger than 36.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!('1'.to_digit(10), Some(1));
+ /// assert_eq!('f'.to_digit(16), Some(15));
+ /// ```
+ ///
+ /// Passing a non-digit results in failure:
+ ///
+ /// ```
+ /// assert_eq!('f'.to_digit(10), None);
+ /// assert_eq!('z'.to_digit(16), None);
+ /// ```
+ ///
+ /// Passing a large radix, causing a panic:
+ ///
+ /// ```should_panic
+ /// // this panics
+ /// '1'.to_digit(37);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_digit(self, radix: u32) -> Option<u32> {
+ assert!(radix <= 36, "to_digit: radix is too high (maximum 36)");
+
+ // the code is split up here to improve execution speed for cases where
+ // the `radix` is constant and 10 or smaller
+ let val = if radix <= 10 {
+ match self {
+ '0'..='9' => self as u32 - '0' as u32,
+ _ => return None,
+ }
+ } else {
+ match self {
+ '0'..='9' => self as u32 - '0' as u32,
+ 'a'..='z' => self as u32 - 'a' as u32 + 10,
+ 'A'..='Z' => self as u32 - 'A' as u32 + 10,
+ _ => return None,
+ }
+ };
+
+ if val < radix { Some(val) } else { None }
+ }
+
+ /// Returns an iterator that yields the hexadecimal Unicode escape of a
+ /// character as `char`s.
+ ///
+ /// This will escape characters with the Rust syntax of the form
+ /// `\u{NNNNNN}` where `NNNNNN` is a hexadecimal representation.
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in '❤'.escape_unicode() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", '❤'.escape_unicode());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\u{{2764}}");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!('❤'.escape_unicode().to_string(), "\\u{2764}");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn escape_unicode(self) -> EscapeUnicode {
+ let c = self as u32;
+
+ // or-ing 1 ensures that for c==0 the code computes that one
+ // digit should be printed and (which is the same) avoids the
+ // (31 - 32) underflow
+ let msb = 31 - (c | 1).leading_zeros();
+
+ // the index of the most significant hex digit
+ let ms_hex_digit = msb / 4;
+ EscapeUnicode {
+ c: self,
+ state: EscapeUnicodeState::Backslash,
+ hex_digit_idx: ms_hex_digit as usize,
+ }
+ }
+
+ /// An extended version of `escape_debug` that optionally permits escaping
+ /// Extended Grapheme codepoints. This allows us to format characters like
+ /// nonspacing marks better when they're at the start of a string.
+ #[inline]
+ pub(crate) fn escape_debug_ext(self, escape_grapheme_extended: bool) -> EscapeDebug {
+ let init_state = match self {
+ '\t' => EscapeDefaultState::Backslash('t'),
+ '\r' => EscapeDefaultState::Backslash('r'),
+ '\n' => EscapeDefaultState::Backslash('n'),
+ '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self),
+ _ if escape_grapheme_extended && self.is_grapheme_extended() => {
+ EscapeDefaultState::Unicode(self.escape_unicode())
+ }
+ _ if is_printable(self) => EscapeDefaultState::Char(self),
+ _ => EscapeDefaultState::Unicode(self.escape_unicode()),
+ };
+ EscapeDebug(EscapeDefault { state: init_state })
+ }
+
+ /// Returns an iterator that yields the literal escape code of a character
+ /// as `char`s.
+ ///
+ /// This will escape the characters similar to the `Debug` implementations
+ /// of `str` or `char`.
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in '\n'.escape_debug() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", '\n'.escape_debug());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\n");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!('\n'.escape_debug().to_string(), "\\n");
+ /// ```
+ #[stable(feature = "char_escape_debug", since = "1.20.0")]
+ #[inline]
+ pub fn escape_debug(self) -> EscapeDebug {
+ self.escape_debug_ext(true)
+ }
+
+ /// Returns an iterator that yields the literal escape code of a character
+ /// as `char`s.
+ ///
+ /// The default is chosen with a bias toward producing literals that are
+ /// legal in a variety of languages, including C++11 and similar C-family
+ /// languages. The exact rules are:
+ ///
+ /// * Tab is escaped as `\t`.
+ /// * Carriage return is escaped as `\r`.
+ /// * Line feed is escaped as `\n`.
+ /// * Single quote is escaped as `\'`.
+ /// * Double quote is escaped as `\"`.
+ /// * Backslash is escaped as `\\`.
+ /// * Any character in the 'printable ASCII' range `0x20` .. `0x7e`
+ /// inclusive is not escaped.
+ /// * All other characters are given hexadecimal Unicode escapes; see
+ /// [`escape_unicode`][escape_unicode].
+ ///
+ /// [escape_unicode]: #method.escape_unicode
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in '"'.escape_default() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", '"'.escape_default());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\\"");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!('"'.escape_default().to_string(), "\\\"");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn escape_default(self) -> EscapeDefault {
+ let init_state = match self {
+ '\t' => EscapeDefaultState::Backslash('t'),
+ '\r' => EscapeDefaultState::Backslash('r'),
+ '\n' => EscapeDefaultState::Backslash('n'),
+ '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self),
+ '\x20'..='\x7e' => EscapeDefaultState::Char(self),
+ _ => EscapeDefaultState::Unicode(self.escape_unicode()),
+ };
+ EscapeDefault { state: init_state }
+ }
+
+ /// Returns the number of bytes this `char` would need if encoded in UTF-8.
+ ///
+ /// That number of bytes is always between 1 and 4, inclusive.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let len = 'A'.len_utf8();
+ /// assert_eq!(len, 1);
+ ///
+ /// let len = 'ß'.len_utf8();
+ /// assert_eq!(len, 2);
+ ///
+ /// let len = 'ℝ'.len_utf8();
+ /// assert_eq!(len, 3);
+ ///
+ /// let len = '💣'.len_utf8();
+ /// assert_eq!(len, 4);
+ /// ```
+ ///
+ /// The `&str` type guarantees that its contents are UTF-8, and so we can compare the length it
+ /// would take if each code point was represented as a `char` vs in the `&str` itself:
+ ///
+ /// ```
+ /// // as chars
+ /// let eastern = '東';
+ /// let capital = '京';
+ ///
+ /// // both can be represented as three bytes
+ /// assert_eq!(3, eastern.len_utf8());
+ /// assert_eq!(3, capital.len_utf8());
+ ///
+ /// // as a &str, these two are encoded in UTF-8
+ /// let tokyo = "東京";
+ ///
+ /// let len = eastern.len_utf8() + capital.len_utf8();
+ ///
+ /// // we can see that they take six bytes total...
+ /// assert_eq!(6, tokyo.len());
+ ///
+ /// // ... just like the &str
+ /// assert_eq!(len, tokyo.len());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn len_utf8(self) -> usize {
+ len_utf8(self as u32)
+ }
+
+ /// Returns the number of 16-bit code units this `char` would need if
+ /// encoded in UTF-16.
+ ///
+ /// See the documentation for [`len_utf8`] for more explanation of this
+ /// concept. This function is a mirror, but for UTF-16 instead of UTF-8.
+ ///
+ /// [`len_utf8`]: #method.len_utf8
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let n = 'ß'.len_utf16();
+ /// assert_eq!(n, 1);
+ ///
+ /// let len = '💣'.len_utf16();
+ /// assert_eq!(len, 2);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn len_utf16(self) -> usize {
+ let ch = self as u32;
+ if (ch & 0xFFFF) == ch { 1 } else { 2 }
+ }
+
+ /// Encodes this character as UTF-8 into the provided byte buffer,
+ /// and then returns the subslice of the buffer that contains the encoded character.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the buffer is not large enough.
+ /// A buffer of length four is large enough to encode any `char`.
+ ///
+ /// # Examples
+ ///
+ /// In both of these examples, 'ß' takes two bytes to encode.
+ ///
+ /// ```
+ /// let mut b = [0; 2];
+ ///
+ /// let result = 'ß'.encode_utf8(&mut b);
+ ///
+ /// assert_eq!(result, "ß");
+ ///
+ /// assert_eq!(result.len(), 2);
+ /// ```
+ ///
+ /// A buffer that's too small:
+ ///
+ /// ```should_panic
+ /// let mut b = [0; 1];
+ ///
+ /// // this panics
+ /// 'ß'.encode_utf8(&mut b);
+ /// ```
+ #[stable(feature = "unicode_encode_char", since = "1.15.0")]
+ #[inline]
+ pub fn encode_utf8(self, dst: &mut [u8]) -> &mut str {
+ // SAFETY: `char` is not a surrogate, so this is valid UTF-8.
+ unsafe { from_utf8_unchecked_mut(encode_utf8_raw(self as u32, dst)) }
+ }
+
+ /// Encodes this character as UTF-16 into the provided `u16` buffer,
+ /// and then returns the subslice of the buffer that contains the encoded character.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the buffer is not large enough.
+ /// A buffer of length 2 is large enough to encode any `char`.
+ ///
+ /// # Examples
+ ///
+ /// In both of these examples, '𝕊' takes two `u16`s to encode.
+ ///
+ /// ```
+ /// let mut b = [0; 2];
+ ///
+ /// let result = '𝕊'.encode_utf16(&mut b);
+ ///
+ /// assert_eq!(result.len(), 2);
+ /// ```
+ ///
+ /// A buffer that's too small:
+ ///
+ /// ```should_panic
+ /// let mut b = [0; 1];
+ ///
+ /// // this panics
+ /// '𝕊'.encode_utf16(&mut b);
+ /// ```
+ #[stable(feature = "unicode_encode_char", since = "1.15.0")]
+ #[inline]
+ pub fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] {
+ encode_utf16_raw(self as u32, dst)
+ }
+
+ /// Returns `true` if this `char` has the `Alphabetic` property.
+ ///
+ /// `Alphabetic` is described in Chapter 4 (Character Properties) of the [Unicode Standard] and
+ /// specified in the [Unicode Character Database][ucd] [`DerivedCoreProperties.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('a'.is_alphabetic());
+ /// assert!('京'.is_alphabetic());
+ ///
+ /// let c = '💝';
+ /// // love is many things, but it is not alphabetic
+ /// assert!(!c.is_alphabetic());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_alphabetic(self) -> bool {
+ match self {
+ 'a'..='z' | 'A'..='Z' => true,
+ c => c > '\x7f' && unicode::Alphabetic(c),
+ }
+ }
+
+ /// Returns `true` if this `char` has the `Lowercase` property.
+ ///
+ /// `Lowercase` is described in Chapter 4 (Character Properties) of the [Unicode Standard] and
+ /// specified in the [Unicode Character Database][ucd] [`DerivedCoreProperties.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('a'.is_lowercase());
+ /// assert!('δ'.is_lowercase());
+ /// assert!(!'A'.is_lowercase());
+ /// assert!(!'Δ'.is_lowercase());
+ ///
+ /// // The various Chinese scripts and punctuation do not have case, and so:
+ /// assert!(!'中'.is_lowercase());
+ /// assert!(!' '.is_lowercase());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_lowercase(self) -> bool {
+ match self {
+ 'a'..='z' => true,
+ c => c > '\x7f' && unicode::Lowercase(c),
+ }
+ }
+
+ /// Returns `true` if this `char` has the `Uppercase` property.
+ ///
+ /// `Uppercase` is described in Chapter 4 (Character Properties) of the [Unicode Standard] and
+ /// specified in the [Unicode Character Database][ucd] [`DerivedCoreProperties.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!(!'a'.is_uppercase());
+ /// assert!(!'δ'.is_uppercase());
+ /// assert!('A'.is_uppercase());
+ /// assert!('Δ'.is_uppercase());
+ ///
+ /// // The various Chinese scripts and punctuation do not have case, and so:
+ /// assert!(!'中'.is_uppercase());
+ /// assert!(!' '.is_uppercase());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_uppercase(self) -> bool {
+ match self {
+ 'A'..='Z' => true,
+ c => c > '\x7f' && unicode::Uppercase(c),
+ }
+ }
+
+ /// Returns `true` if this `char` has the `White_Space` property.
+ ///
+ /// `White_Space` is specified in the [Unicode Character Database][ucd] [`PropList.txt`].
+ ///
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`PropList.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/PropList.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!(' '.is_whitespace());
+ ///
+ /// // a non-breaking space
+ /// assert!('\u{A0}'.is_whitespace());
+ ///
+ /// assert!(!'越'.is_whitespace());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_whitespace(self) -> bool {
+ match self {
+ ' ' | '\x09'..='\x0d' => true,
+ c => c > '\x7f' && unicode::White_Space(c),
+ }
+ }
+
+ /// Returns `true` if this `char` satisfies either [`is_alphabetic()`] or [`is_numeric()`].
+ ///
+ /// [`is_alphabetic()`]: #method.is_alphabetic
+ /// [`is_numeric()`]: #method.is_numeric
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('٣'.is_alphanumeric());
+ /// assert!('7'.is_alphanumeric());
+ /// assert!('৬'.is_alphanumeric());
+ /// assert!('¾'.is_alphanumeric());
+ /// assert!('①'.is_alphanumeric());
+ /// assert!('K'.is_alphanumeric());
+ /// assert!('و'.is_alphanumeric());
+ /// assert!('藏'.is_alphanumeric());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_alphanumeric(self) -> bool {
+ self.is_alphabetic() || self.is_numeric()
+ }
+
+ /// Returns `true` if this `char` has the general category for control codes.
+ ///
+ /// Control codes (code points with the general category of `Cc`) are described in Chapter 4
+ /// (Character Properties) of the [Unicode Standard] and specified in the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // U+009C, STRING TERMINATOR
+ /// assert!('\9c'.is_control());
+ /// assert!(!'q'.is_control());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_control(self) -> bool {
+ unicode::Cc(self)
+ }
+
+ /// Returns `true` if this `char` has the `Grapheme_Extend` property.
+ ///
+ /// `Grapheme_Extend` is described in [Unicode Standard Annex #29 (Unicode Text
+ /// Segmentation)][uax29] and specified in the [Unicode Character Database][ucd]
+ /// [`DerivedCoreProperties.txt`].
+ ///
+ /// [uax29]: https://www.unicode.org/reports/tr29/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`DerivedCoreProperties.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt
+ #[inline]
+ pub(crate) fn is_grapheme_extended(self) -> bool {
+ unicode::Grapheme_Extend(self)
+ }
+
+ /// Returns `true` if this `char` has one of the general categories for numbers.
+ ///
+ /// The general categories for numbers (`Nd` for decimal digits, `Nl` for letter-like numeric
+ /// characters, and `No` for other numeric characters) are specified in the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`].
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert!('٣'.is_numeric());
+ /// assert!('7'.is_numeric());
+ /// assert!('৬'.is_numeric());
+ /// assert!('¾'.is_numeric());
+ /// assert!('①'.is_numeric());
+ /// assert!(!'K'.is_numeric());
+ /// assert!(!'و'.is_numeric());
+ /// assert!(!'藏'.is_numeric());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn is_numeric(self) -> bool {
+ match self {
+ '0'..='9' => true,
+ c => c > '\x7f' && unicode::N(c),
+ }
+ }
+
+ /// Returns an iterator that yields the lowercase mapping of this `char` as one or more
+ /// `char`s.
+ ///
+ /// If this `char` does not have a lowercase mapping, the iterator yields the same `char`.
+ ///
+ /// If this `char` has a one-to-one lowercase mapping given by the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`], the iterator yields that `char`.
+ ///
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields
+ /// the `char`(s) given by [`SpecialCasing.txt`].
+ ///
+ /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt
+ ///
+ /// This operation performs an unconditional mapping without tailoring. That is, the conversion
+ /// is independent of context and language.
+ ///
+ /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in
+ /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion.
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in 'İ'.to_lowercase() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", 'İ'.to_lowercase());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("i\u{307}");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!('C'.to_lowercase().to_string(), "c");
+ ///
+ /// // Sometimes the result is more than one character:
+ /// assert_eq!('İ'.to_lowercase().to_string(), "i\u{307}");
+ ///
+ /// // Characters that do not have both uppercase and lowercase
+ /// // convert into themselves.
+ /// assert_eq!('山'.to_lowercase().to_string(), "山");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_lowercase(self) -> ToLowercase {
+ ToLowercase(CaseMappingIter::new(conversions::to_lower(self)))
+ }
+
+ /// Returns an iterator that yields the uppercase mapping of this `char` as one or more
+ /// `char`s.
+ ///
+ /// If this `char` does not have a uppercase mapping, the iterator yields the same `char`.
+ ///
+ /// If this `char` has a one-to-one uppercase mapping given by the [Unicode Character
+ /// Database][ucd] [`UnicodeData.txt`], the iterator yields that `char`.
+ ///
+ /// [ucd]: https://www.unicode.org/reports/tr44/
+ /// [`UnicodeData.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
+ ///
+ /// If this `char` requires special considerations (e.g. multiple `char`s) the iterator yields
+ /// the `char`(s) given by [`SpecialCasing.txt`].
+ ///
+ /// [`SpecialCasing.txt`]: https://www.unicode.org/Public/UCD/latest/ucd/SpecialCasing.txt
+ ///
+ /// This operation performs an unconditional mapping without tailoring. That is, the conversion
+ /// is independent of context and language.
+ ///
+ /// In the [Unicode Standard], Chapter 4 (Character Properties) discusses case mapping in
+ /// general and Chapter 3 (Conformance) discusses the default algorithm for case conversion.
+ ///
+ /// [Unicode Standard]: https://www.unicode.org/versions/latest/
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in 'ß'.to_uppercase() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", 'ß'.to_uppercase());
+ /// ```
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("SS");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!('c'.to_uppercase().to_string(), "C");
+ ///
+ /// // Sometimes the result is more than one character:
+ /// assert_eq!('ß'.to_uppercase().to_string(), "SS");
+ ///
+ /// // Characters that do not have both uppercase and lowercase
+ /// // convert into themselves.
+ /// assert_eq!('山'.to_uppercase().to_string(), "山");
+ /// ```
+ ///
+ /// # Note on locale
+ ///
+ /// In Turkish, the equivalent of 'i' in Latin has five forms instead of two:
+ ///
+ /// * 'Dotless': I / ı, sometimes written ï
+ /// * 'Dotted': İ / i
+ ///
+ /// Note that the lowercase dotted 'i' is the same as the Latin. Therefore:
+ ///
+ /// ```
+ /// let upper_i = 'i'.to_uppercase().to_string();
+ /// ```
+ ///
+ /// The value of `upper_i` here relies on the language of the text: if we're
+ /// in `en-US`, it should be `"I"`, but if we're in `tr_TR`, it should
+ /// be `"İ"`. `to_uppercase()` does not take this into account, and so:
+ ///
+ /// ```
+ /// let upper_i = 'i'.to_uppercase().to_string();
+ ///
+ /// assert_eq!(upper_i, "I");
+ /// ```
+ ///
+ /// holds across languages.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_uppercase(self) -> ToUppercase {
+ ToUppercase(CaseMappingIter::new(conversions::to_upper(self)))
+ }
+
+ /// Checks if the value is within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 'a';
+ /// let non_ascii = '❤';
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.32.0")]
+ #[inline]
+ pub const fn is_ascii(&self) -> bool {
+ *self as u32 <= 0x7F
+ }
+
+ /// Makes a copy of the value in its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// To uppercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 'a';
+ /// let non_ascii = '❤';
+ ///
+ /// assert_eq!('A', ascii.to_ascii_uppercase());
+ /// assert_eq!('❤', non_ascii.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase`]: #method.make_ascii_uppercase
+ /// [`to_uppercase`]: #method.to_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> char {
+ if self.is_ascii() { (*self as u8).to_ascii_uppercase() as char } else { *self }
+ }
+
+ /// Makes a copy of the value in its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// To lowercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 'A';
+ /// let non_ascii = '❤';
+ ///
+ /// assert_eq!('a', ascii.to_ascii_lowercase());
+ /// assert_eq!('❤', non_ascii.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase`]: #method.make_ascii_lowercase
+ /// [`to_lowercase`]: #method.to_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> char {
+ if self.is_ascii() { (*self as u8).to_ascii_lowercase() as char } else { *self }
+ }
+
+ /// Checks that two values are an ASCII case-insensitive match.
+ ///
+ /// Equivalent to `to_ascii_lowercase(a) == to_ascii_lowercase(b)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let upper_a = 'A';
+ /// let lower_a = 'a';
+ /// let lower_z = 'z';
+ ///
+ /// assert!(upper_a.eq_ignore_ascii_case(&lower_a));
+ /// assert!(upper_a.eq_ignore_ascii_case(&upper_a));
+ /// assert!(!upper_a.eq_ignore_ascii_case(&lower_z));
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &char) -> bool {
+ self.to_ascii_lowercase() == other.to_ascii_lowercase()
+ }
+
+ /// Converts this type to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut ascii = 'a';
+ ///
+ /// ascii.make_ascii_uppercase();
+ ///
+ /// assert_eq!('A', ascii);
+ /// ```
+ ///
+ /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ *self = self.to_ascii_uppercase();
+ }
+
+ /// Converts this type to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut ascii = 'A';
+ ///
+ /// ascii.make_ascii_lowercase();
+ ///
+ /// assert_eq!('a', ascii);
+ /// ```
+ ///
+ /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ *self = self.to_ascii_lowercase();
+ }
+
+ /// Checks if the value is an ASCII alphabetic character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(uppercase_a.is_ascii_alphabetic());
+ /// assert!(uppercase_g.is_ascii_alphabetic());
+ /// assert!(a.is_ascii_alphabetic());
+ /// assert!(g.is_ascii_alphabetic());
+ /// assert!(!zero.is_ascii_alphabetic());
+ /// assert!(!percent.is_ascii_alphabetic());
+ /// assert!(!space.is_ascii_alphabetic());
+ /// assert!(!lf.is_ascii_alphabetic());
+ /// assert!(!esc.is_ascii_alphabetic());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphabetic(&self) -> bool {
+ matches!(*self, 'A'..='Z' | 'a'..='z')
+ }
+
+ /// Checks if the value is an ASCII uppercase character:
+ /// U+0041 'A' ..= U+005A 'Z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(uppercase_a.is_ascii_uppercase());
+ /// assert!(uppercase_g.is_ascii_uppercase());
+ /// assert!(!a.is_ascii_uppercase());
+ /// assert!(!g.is_ascii_uppercase());
+ /// assert!(!zero.is_ascii_uppercase());
+ /// assert!(!percent.is_ascii_uppercase());
+ /// assert!(!space.is_ascii_uppercase());
+ /// assert!(!lf.is_ascii_uppercase());
+ /// assert!(!esc.is_ascii_uppercase());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_uppercase(&self) -> bool {
+ matches!(*self, 'A'..='Z')
+ }
+
+ /// Checks if the value is an ASCII lowercase character:
+ /// U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(!uppercase_a.is_ascii_lowercase());
+ /// assert!(!uppercase_g.is_ascii_lowercase());
+ /// assert!(a.is_ascii_lowercase());
+ /// assert!(g.is_ascii_lowercase());
+ /// assert!(!zero.is_ascii_lowercase());
+ /// assert!(!percent.is_ascii_lowercase());
+ /// assert!(!space.is_ascii_lowercase());
+ /// assert!(!lf.is_ascii_lowercase());
+ /// assert!(!esc.is_ascii_lowercase());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_lowercase(&self) -> bool {
+ matches!(*self, 'a'..='z')
+ }
+
+ /// Checks if the value is an ASCII alphanumeric character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z', or
+ /// - U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(uppercase_a.is_ascii_alphanumeric());
+ /// assert!(uppercase_g.is_ascii_alphanumeric());
+ /// assert!(a.is_ascii_alphanumeric());
+ /// assert!(g.is_ascii_alphanumeric());
+ /// assert!(zero.is_ascii_alphanumeric());
+ /// assert!(!percent.is_ascii_alphanumeric());
+ /// assert!(!space.is_ascii_alphanumeric());
+ /// assert!(!lf.is_ascii_alphanumeric());
+ /// assert!(!esc.is_ascii_alphanumeric());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphanumeric(&self) -> bool {
+ matches!(*self, '0'..='9' | 'A'..='Z' | 'a'..='z')
+ }
+
+ /// Checks if the value is an ASCII decimal digit:
+ /// U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(!uppercase_a.is_ascii_digit());
+ /// assert!(!uppercase_g.is_ascii_digit());
+ /// assert!(!a.is_ascii_digit());
+ /// assert!(!g.is_ascii_digit());
+ /// assert!(zero.is_ascii_digit());
+ /// assert!(!percent.is_ascii_digit());
+ /// assert!(!space.is_ascii_digit());
+ /// assert!(!lf.is_ascii_digit());
+ /// assert!(!esc.is_ascii_digit());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_digit(&self) -> bool {
+ matches!(*self, '0'..='9')
+ }
+
+ /// Checks if the value is an ASCII hexadecimal digit:
+ ///
+ /// - U+0030 '0' ..= U+0039 '9', or
+ /// - U+0041 'A' ..= U+0046 'F', or
+ /// - U+0061 'a' ..= U+0066 'f'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(uppercase_a.is_ascii_hexdigit());
+ /// assert!(!uppercase_g.is_ascii_hexdigit());
+ /// assert!(a.is_ascii_hexdigit());
+ /// assert!(!g.is_ascii_hexdigit());
+ /// assert!(zero.is_ascii_hexdigit());
+ /// assert!(!percent.is_ascii_hexdigit());
+ /// assert!(!space.is_ascii_hexdigit());
+ /// assert!(!lf.is_ascii_hexdigit());
+ /// assert!(!esc.is_ascii_hexdigit());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_hexdigit(&self) -> bool {
+ matches!(*self, '0'..='9' | 'A'..='F' | 'a'..='f')
+ }
+
+ /// Checks if the value is an ASCII punctuation character:
+ ///
+ /// - U+0021 ..= U+002F `! " # $ % & ' ( ) * + , - . /`, or
+ /// - U+003A ..= U+0040 `: ; < = > ? @`, or
+ /// - U+005B ..= U+0060 ``[ \ ] ^ _ ` ``, or
+ /// - U+007B ..= U+007E `{ | } ~`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(!uppercase_a.is_ascii_punctuation());
+ /// assert!(!uppercase_g.is_ascii_punctuation());
+ /// assert!(!a.is_ascii_punctuation());
+ /// assert!(!g.is_ascii_punctuation());
+ /// assert!(!zero.is_ascii_punctuation());
+ /// assert!(percent.is_ascii_punctuation());
+ /// assert!(!space.is_ascii_punctuation());
+ /// assert!(!lf.is_ascii_punctuation());
+ /// assert!(!esc.is_ascii_punctuation());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_punctuation(&self) -> bool {
+ matches!(*self, '!'..='/' | ':'..='@' | '['..='`' | '{'..='~')
+ }
+
+ /// Checks if the value is an ASCII graphic character:
+ /// U+0021 '!' ..= U+007E '~'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(uppercase_a.is_ascii_graphic());
+ /// assert!(uppercase_g.is_ascii_graphic());
+ /// assert!(a.is_ascii_graphic());
+ /// assert!(g.is_ascii_graphic());
+ /// assert!(zero.is_ascii_graphic());
+ /// assert!(percent.is_ascii_graphic());
+ /// assert!(!space.is_ascii_graphic());
+ /// assert!(!lf.is_ascii_graphic());
+ /// assert!(!esc.is_ascii_graphic());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_graphic(&self) -> bool {
+ matches!(*self, '!'..='~')
+ }
+
+ /// Checks if the value is an ASCII whitespace character:
+ /// U+0020 SPACE, U+0009 HORIZONTAL TAB, U+000A LINE FEED,
+ /// U+000C FORM FEED, or U+000D CARRIAGE RETURN.
+ ///
+ /// Rust uses the WhatWG Infra Standard's [definition of ASCII
+ /// whitespace][infra-aw]. There are several other definitions in
+ /// wide use. For instance, [the POSIX locale][pct] includes
+ /// U+000B VERTICAL TAB as well as all the above characters,
+ /// but—from the very same specification—[the default rule for
+ /// "field splitting" in the Bourne shell][bfs] considers *only*
+ /// SPACE, HORIZONTAL TAB, and LINE FEED as whitespace.
+ ///
+ /// If you are writing a program that will process an existing
+ /// file format, check what that format's definition of whitespace is
+ /// before using this function.
+ ///
+ /// [infra-aw]: https://infra.spec.whatwg.org/#ascii-whitespace
+ /// [pct]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap07.html#tag_07_03_01
+ /// [bfs]: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(!uppercase_a.is_ascii_whitespace());
+ /// assert!(!uppercase_g.is_ascii_whitespace());
+ /// assert!(!a.is_ascii_whitespace());
+ /// assert!(!g.is_ascii_whitespace());
+ /// assert!(!zero.is_ascii_whitespace());
+ /// assert!(!percent.is_ascii_whitespace());
+ /// assert!(space.is_ascii_whitespace());
+ /// assert!(lf.is_ascii_whitespace());
+ /// assert!(!esc.is_ascii_whitespace());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_whitespace(&self) -> bool {
+ matches!(*self, '\t' | '\n' | '\x0C' | '\r' | ' ')
+ }
+
+ /// Checks if the value is an ASCII control character:
+ /// U+0000 NUL ..= U+001F UNIT SEPARATOR, or U+007F DELETE.
+ /// Note that most ASCII whitespace characters are control
+ /// characters, but SPACE is not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 'A';
+ /// let uppercase_g = 'G';
+ /// let a = 'a';
+ /// let g = 'g';
+ /// let zero = '0';
+ /// let percent = '%';
+ /// let space = ' ';
+ /// let lf = '\n';
+ /// let esc: char = 0x1b_u8.into();
+ ///
+ /// assert!(!uppercase_a.is_ascii_control());
+ /// assert!(!uppercase_g.is_ascii_control());
+ /// assert!(!a.is_ascii_control());
+ /// assert!(!g.is_ascii_control());
+ /// assert!(!zero.is_ascii_control());
+ /// assert!(!percent.is_ascii_control());
+ /// assert!(!space.is_ascii_control());
+ /// assert!(lf.is_ascii_control());
+ /// assert!(esc.is_ascii_control());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_control(&self) -> bool {
+ matches!(*self, '\0'..='\x1F' | '\x7F')
+ }
+}
+
+#[inline]
+fn len_utf8(code: u32) -> usize {
+ if code < MAX_ONE_B {
+ 1
+ } else if code < MAX_TWO_B {
+ 2
+ } else if code < MAX_THREE_B {
+ 3
+ } else {
+ 4
+ }
+}
+
+/// Encodes a raw u32 value as UTF-8 into the provided byte buffer,
+/// and then returns the subslice of the buffer that contains the encoded character.
+///
+/// Unlike `char::encode_utf8`, this method also handles codepoints in the surrogate range.
+/// (Creating a `char` in the surrogate range is UB.)
+/// The result is valid [generalized UTF-8] but not valid UTF-8.
+///
+/// [generalized UTF-8]: https://simonsapin.github.io/wtf-8/#generalized-utf8
+///
+/// # Panics
+///
+/// Panics if the buffer is not large enough.
+/// A buffer of length four is large enough to encode any `char`.
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+#[doc(hidden)]
+#[inline]
+pub fn encode_utf8_raw(code: u32, dst: &mut [u8]) -> &mut [u8] {
+ let len = len_utf8(code);
+ match (len, &mut dst[..]) {
+ (1, [a, ..]) => {
+ *a = code as u8;
+ }
+ (2, [a, b, ..]) => {
+ *a = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
+ *b = (code & 0x3F) as u8 | TAG_CONT;
+ }
+ (3, [a, b, c, ..]) => {
+ *a = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
+ *b = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ *c = (code & 0x3F) as u8 | TAG_CONT;
+ }
+ (4, [a, b, c, d, ..]) => {
+ *a = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
+ *b = (code >> 12 & 0x3F) as u8 | TAG_CONT;
+ *c = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ *d = (code & 0x3F) as u8 | TAG_CONT;
+ }
+ _ => panic!(
+ "encode_utf8: need {} bytes to encode U+{:X}, but the buffer has {}",
+ len,
+ code,
+ dst.len(),
+ ),
+ };
+ &mut dst[..len]
+}
+
+/// Encodes a raw u32 value as UTF-16 into the provided `u16` buffer,
+/// and then returns the subslice of the buffer that contains the encoded character.
+///
+/// Unlike `char::encode_utf16`, this method also handles codepoints in the surrogate range.
+/// (Creating a `char` in the surrogate range is UB.)
+///
+/// # Panics
+///
+/// Panics if the buffer is not large enough.
+/// A buffer of length 2 is large enough to encode any `char`.
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+#[doc(hidden)]
+#[inline]
+pub fn encode_utf16_raw(mut code: u32, dst: &mut [u16]) -> &mut [u16] {
+ // SAFETY: each arm checks whether there are enough bits to write into
+ unsafe {
+ if (code & 0xFFFF) == code && !dst.is_empty() {
+ // The BMP falls through
+ *dst.get_unchecked_mut(0) = code as u16;
+ slice::from_raw_parts_mut(dst.as_mut_ptr(), 1)
+ } else if dst.len() >= 2 {
+ // Supplementary planes break into surrogates.
+ code -= 0x1_0000;
+ *dst.get_unchecked_mut(0) = 0xD800 | ((code >> 10) as u16);
+ *dst.get_unchecked_mut(1) = 0xDC00 | ((code as u16) & 0x3FF);
+ slice::from_raw_parts_mut(dst.as_mut_ptr(), 2)
+ } else {
+ panic!(
+ "encode_utf16: need {} units to encode U+{:X}, but the buffer has {}",
+ from_u32_unchecked(code).len_utf16(),
+ code,
+ dst.len(),
+ )
+ }
+ }
+}
--- /dev/null
+//! A character type.
+//!
+//! The `char` type represents a single character. More specifically, since
+//! 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode
+//! scalar value]', which is similar to, but not the same as, a '[Unicode code
+//! point]'.
+//!
+//! [Unicode scalar value]: http://www.unicode.org/glossary/#unicode_scalar_value
+//! [Unicode code point]: http://www.unicode.org/glossary/#code_point
+//!
+//! This module exists for technical reasons, the primary documentation for
+//! `char` is directly on [the `char` primitive type](../../std/primitive.char.html)
+//! itself.
+//!
+//! This module is the home of the iterator implementations for the iterators
+//! implemented on `char`, as well as some useful constants and conversion
+//! functions that convert various types to `char`.
+
+#![allow(non_snake_case)]
+#![stable(feature = "core_char", since = "1.2.0")]
+
+mod convert;
+mod decode;
+mod methods;
+
+// stable re-exports
+#[stable(feature = "char_from_unchecked", since = "1.5.0")]
+pub use self::convert::from_u32_unchecked;
+#[stable(feature = "try_from", since = "1.34.0")]
+pub use self::convert::CharTryFromError;
+#[stable(feature = "char_from_str", since = "1.20.0")]
+pub use self::convert::ParseCharError;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::convert::{from_digit, from_u32};
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+pub use self::decode::{decode_utf16, DecodeUtf16, DecodeUtf16Error};
+#[stable(feature = "unicode_version", since = "1.45.0")]
+pub use crate::unicode::UNICODE_VERSION;
+
+// perma-unstable re-exports
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+pub use self::methods::encode_utf16_raw;
+#[unstable(feature = "char_internals", reason = "exposed only for libstd", issue = "none")]
+pub use self::methods::encode_utf8_raw;
+
+use crate::fmt::{self, Write};
+use crate::iter::FusedIterator;
+
+// UTF-8 ranges and tags for encoding characters
+const TAG_CONT: u8 = 0b1000_0000;
+const TAG_TWO_B: u8 = 0b1100_0000;
+const TAG_THREE_B: u8 = 0b1110_0000;
+const TAG_FOUR_B: u8 = 0b1111_0000;
+const MAX_ONE_B: u32 = 0x80;
+const MAX_TWO_B: u32 = 0x800;
+const MAX_THREE_B: u32 = 0x10000;
+
+/*
+ Lu Uppercase_Letter an uppercase letter
+ Ll Lowercase_Letter a lowercase letter
+ Lt Titlecase_Letter a digraphic character, with first part uppercase
+ Lm Modifier_Letter a modifier letter
+ Lo Other_Letter other letters, including syllables and ideographs
+ Mn Nonspacing_Mark a nonspacing combining mark (zero advance width)
+ Mc Spacing_Mark a spacing combining mark (positive advance width)
+ Me Enclosing_Mark an enclosing combining mark
+ Nd Decimal_Number a decimal digit
+ Nl Letter_Number a letterlike numeric character
+ No Other_Number a numeric character of other type
+ Pc Connector_Punctuation a connecting punctuation mark, like a tie
+ Pd Dash_Punctuation a dash or hyphen punctuation mark
+ Ps Open_Punctuation an opening punctuation mark (of a pair)
+ Pe Close_Punctuation a closing punctuation mark (of a pair)
+ Pi Initial_Punctuation an initial quotation mark
+ Pf Final_Punctuation a final quotation mark
+ Po Other_Punctuation a punctuation mark of other type
+ Sm Math_Symbol a symbol of primarily mathematical use
+ Sc Currency_Symbol a currency sign
+ Sk Modifier_Symbol a non-letterlike modifier symbol
+ So Other_Symbol a symbol of other type
+ Zs Space_Separator a space character (of various non-zero widths)
+ Zl Line_Separator U+2028 LINE SEPARATOR only
+ Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only
+ Cc Control a C0 or C1 control code
+ Cf Format a format control character
+ Cs Surrogate a surrogate code point
+ Co Private_Use a private-use character
+ Cn Unassigned a reserved unassigned code point or a noncharacter
+*/
+
+/// The highest valid code point a `char` can have.
+///
+/// A [`char`] is a [Unicode Scalar Value], which means that it is a [Code
+/// Point], but only ones within a certain range. `MAX` is the highest valid
+/// code point that's a valid [Unicode Scalar Value].
+///
+/// [Unicode Scalar Value]: http://www.unicode.org/glossary/#unicode_scalar_value
+/// [Code Point]: http://www.unicode.org/glossary/#code_point
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX: char = char::MAX;
+
+/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a
+/// decoding error.
+///
+/// It can occur, for example, when giving ill-formed UTF-8 bytes to
+/// [`String::from_utf8_lossy`](../../std/string/struct.String.html#method.from_utf8_lossy).
+#[stable(feature = "decode_utf16", since = "1.9.0")]
+pub const REPLACEMENT_CHARACTER: char = char::REPLACEMENT_CHARACTER;
+
+/// Returns an iterator that yields the hexadecimal Unicode escape of a
+/// character, as `char`s.
+///
+/// This `struct` is created by the [`escape_unicode`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`escape_unicode`]: char::escape_unicode
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct EscapeUnicode {
+ c: char,
+ state: EscapeUnicodeState,
+
+ // The index of the next hex digit to be printed (0 if none),
+ // i.e., the number of remaining hex digits to be printed;
+ // increasing from the least significant digit: 0x543210
+ hex_digit_idx: usize,
+}
+
+// The enum values are ordered so that their representation is the
+// same as the remaining length (besides the hexadecimal digits). This
+// likely makes `len()` a single load from memory) and inline-worth.
+#[derive(Clone, Debug)]
+enum EscapeUnicodeState {
+ Done,
+ RightBrace,
+ Value,
+ LeftBrace,
+ Type,
+ Backslash,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for EscapeUnicode {
+ type Item = char;
+
+ fn next(&mut self) -> Option<char> {
+ match self.state {
+ EscapeUnicodeState::Backslash => {
+ self.state = EscapeUnicodeState::Type;
+ Some('\\')
+ }
+ EscapeUnicodeState::Type => {
+ self.state = EscapeUnicodeState::LeftBrace;
+ Some('u')
+ }
+ EscapeUnicodeState::LeftBrace => {
+ self.state = EscapeUnicodeState::Value;
+ Some('{')
+ }
+ EscapeUnicodeState::Value => {
+ let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf;
+ let c = from_digit(hex_digit, 16).unwrap();
+ if self.hex_digit_idx == 0 {
+ self.state = EscapeUnicodeState::RightBrace;
+ } else {
+ self.hex_digit_idx -= 1;
+ }
+ Some(c)
+ }
+ EscapeUnicodeState::RightBrace => {
+ self.state = EscapeUnicodeState::Done;
+ Some('}')
+ }
+ EscapeUnicodeState::Done => None,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.len();
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn last(self) -> Option<char> {
+ match self.state {
+ EscapeUnicodeState::Done => None,
+
+ EscapeUnicodeState::RightBrace
+ | EscapeUnicodeState::Value
+ | EscapeUnicodeState::LeftBrace
+ | EscapeUnicodeState::Type
+ | EscapeUnicodeState::Backslash => Some('}'),
+ }
+ }
+}
+
+#[stable(feature = "exact_size_escape", since = "1.11.0")]
+impl ExactSizeIterator for EscapeUnicode {
+ #[inline]
+ fn len(&self) -> usize {
+ // The match is a single memory access with no branching
+ self.hex_digit_idx
+ + match self.state {
+ EscapeUnicodeState::Done => 0,
+ EscapeUnicodeState::RightBrace => 1,
+ EscapeUnicodeState::Value => 2,
+ EscapeUnicodeState::LeftBrace => 3,
+ EscapeUnicodeState::Type => 4,
+ EscapeUnicodeState::Backslash => 5,
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeUnicode {}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for EscapeUnicode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for c in self.clone() {
+ f.write_char(c)?;
+ }
+ Ok(())
+ }
+}
+
+/// An iterator that yields the literal escape code of a `char`.
+///
+/// This `struct` is created by the [`escape_default`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`escape_default`]: char::escape_default
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct EscapeDefault {
+ state: EscapeDefaultState,
+}
+
+#[derive(Clone, Debug)]
+enum EscapeDefaultState {
+ Done,
+ Char(char),
+ Backslash(char),
+ Unicode(EscapeUnicode),
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for EscapeDefault {
+ type Item = char;
+
+ fn next(&mut self) -> Option<char> {
+ match self.state {
+ EscapeDefaultState::Backslash(c) => {
+ self.state = EscapeDefaultState::Char(c);
+ Some('\\')
+ }
+ EscapeDefaultState::Char(c) => {
+ self.state = EscapeDefaultState::Done;
+ Some(c)
+ }
+ EscapeDefaultState::Done => None,
+ EscapeDefaultState::Unicode(ref mut iter) => iter.next(),
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.len();
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ fn nth(&mut self, n: usize) -> Option<char> {
+ match self.state {
+ EscapeDefaultState::Backslash(c) if n == 0 => {
+ self.state = EscapeDefaultState::Char(c);
+ Some('\\')
+ }
+ EscapeDefaultState::Backslash(c) if n == 1 => {
+ self.state = EscapeDefaultState::Done;
+ Some(c)
+ }
+ EscapeDefaultState::Backslash(_) => {
+ self.state = EscapeDefaultState::Done;
+ None
+ }
+ EscapeDefaultState::Char(c) => {
+ self.state = EscapeDefaultState::Done;
+
+ if n == 0 { Some(c) } else { None }
+ }
+ EscapeDefaultState::Done => None,
+ EscapeDefaultState::Unicode(ref mut i) => i.nth(n),
+ }
+ }
+
+ fn last(self) -> Option<char> {
+ match self.state {
+ EscapeDefaultState::Unicode(iter) => iter.last(),
+ EscapeDefaultState::Done => None,
+ EscapeDefaultState::Backslash(c) | EscapeDefaultState::Char(c) => Some(c),
+ }
+ }
+}
+
+#[stable(feature = "exact_size_escape", since = "1.11.0")]
+impl ExactSizeIterator for EscapeDefault {
+ fn len(&self) -> usize {
+ match self.state {
+ EscapeDefaultState::Done => 0,
+ EscapeDefaultState::Char(_) => 1,
+ EscapeDefaultState::Backslash(_) => 2,
+ EscapeDefaultState::Unicode(ref iter) => iter.len(),
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeDefault {}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for EscapeDefault {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for c in self.clone() {
+ f.write_char(c)?;
+ }
+ Ok(())
+ }
+}
+
+/// An iterator that yields the literal escape code of a `char`.
+///
+/// This `struct` is created by the [`escape_debug`] method on [`char`]. See its
+/// documentation for more.
+///
+/// [`escape_debug`]: char::escape_debug
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeDebug(EscapeDefault);
+
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+impl Iterator for EscapeDebug {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ self.0.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+impl ExactSizeIterator for EscapeDebug {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EscapeDebug {}
+
+#[stable(feature = "char_escape_debug", since = "1.20.0")]
+impl fmt::Display for EscapeDebug {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+/// Returns an iterator that yields the lowercase equivalent of a `char`.
+///
+/// This `struct` is created by the [`to_lowercase`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`to_lowercase`]: char::to_lowercase
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Clone)]
+pub struct ToLowercase(CaseMappingIter);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for ToLowercase {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ self.0.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for ToLowercase {}
+
+#[stable(feature = "exact_size_case_mapping_iter", since = "1.35.0")]
+impl ExactSizeIterator for ToLowercase {}
+
+/// Returns an iterator that yields the uppercase equivalent of a `char`.
+///
+/// This `struct` is created by the [`to_uppercase`] method on [`char`]. See
+/// its documentation for more.
+///
+/// [`to_uppercase`]: char::to_uppercase
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug, Clone)]
+pub struct ToUppercase(CaseMappingIter);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for ToUppercase {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ self.0.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for ToUppercase {}
+
+#[stable(feature = "exact_size_case_mapping_iter", since = "1.35.0")]
+impl ExactSizeIterator for ToUppercase {}
+
+#[derive(Debug, Clone)]
+enum CaseMappingIter {
+ Three(char, char, char),
+ Two(char, char),
+ One(char),
+ Zero,
+}
+
+impl CaseMappingIter {
+ fn new(chars: [char; 3]) -> CaseMappingIter {
+ if chars[2] == '\0' {
+ if chars[1] == '\0' {
+ CaseMappingIter::One(chars[0]) // Including if chars[0] == '\0'
+ } else {
+ CaseMappingIter::Two(chars[0], chars[1])
+ }
+ } else {
+ CaseMappingIter::Three(chars[0], chars[1], chars[2])
+ }
+ }
+}
+
+impl Iterator for CaseMappingIter {
+ type Item = char;
+ fn next(&mut self) -> Option<char> {
+ match *self {
+ CaseMappingIter::Three(a, b, c) => {
+ *self = CaseMappingIter::Two(b, c);
+ Some(a)
+ }
+ CaseMappingIter::Two(b, c) => {
+ *self = CaseMappingIter::One(c);
+ Some(b)
+ }
+ CaseMappingIter::One(c) => {
+ *self = CaseMappingIter::Zero;
+ Some(c)
+ }
+ CaseMappingIter::Zero => None,
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = match self {
+ CaseMappingIter::Three(..) => 3,
+ CaseMappingIter::Two(..) => 2,
+ CaseMappingIter::One(_) => 1,
+ CaseMappingIter::Zero => 0,
+ };
+ (size, Some(size))
+ }
+}
+
+impl fmt::Display for CaseMappingIter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ CaseMappingIter::Three(a, b, c) => {
+ f.write_char(a)?;
+ f.write_char(b)?;
+ f.write_char(c)
+ }
+ CaseMappingIter::Two(b, c) => {
+ f.write_char(b)?;
+ f.write_char(c)
+ }
+ CaseMappingIter::One(c) => f.write_char(c),
+ CaseMappingIter::Zero => Ok(()),
+ }
+ }
+}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for ToLowercase {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+#[stable(feature = "char_struct_display", since = "1.16.0")]
+impl fmt::Display for ToUppercase {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
--- /dev/null
+//! The `Clone` trait for types that cannot be 'implicitly copied'.
+//!
+//! In Rust, some simple types are "implicitly copyable" and when you
+//! assign them or pass them as arguments, the receiver will get a copy,
+//! leaving the original value in place. These types do not require
+//! allocation to copy and do not have finalizers (i.e., they do not
+//! contain owned boxes or implement [`Drop`]), so the compiler considers
+//! them cheap and safe to copy. For other types copies must be made
+//! explicitly, by convention implementing the [`Clone`] trait and calling
+//! the [`clone`] method.
+//!
+//! [`clone`]: Clone::clone
+//!
+//! Basic usage example:
+//!
+//! ```
+//! let s = String::new(); // String type implements Clone
+//! let copy = s.clone(); // so we can clone it
+//! ```
+//!
+//! To easily implement the Clone trait, you can also use
+//! `#[derive(Clone)]`. Example:
+//!
+//! ```
+//! #[derive(Clone)] // we add the Clone trait to Morpheus struct
+//! struct Morpheus {
+//! blue_pill: f32,
+//! red_pill: i64,
+//! }
+//!
+//! fn main() {
+//! let f = Morpheus { blue_pill: 0.0, red_pill: 0 };
+//! let copy = f.clone(); // and now we can clone it!
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+/// A common trait for the ability to explicitly duplicate an object.
+///
+/// Differs from [`Copy`] in that [`Copy`] is implicit and extremely inexpensive, while
+/// `Clone` is always explicit and may or may not be expensive. In order to enforce
+/// these characteristics, Rust does not allow you to reimplement [`Copy`], but you
+/// may reimplement `Clone` and run arbitrary code.
+///
+/// Since `Clone` is more general than [`Copy`], you can automatically make anything
+/// [`Copy`] be `Clone` as well.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d
+/// implementation of [`Clone`] calls [`clone`] on each field.
+///
+/// [`clone`]: Clone::clone
+///
+/// For a generic struct, `#[derive]` implements `Clone` conditionally by adding bound `Clone` on
+/// generic parameters.
+///
+/// ```
+/// // `derive` implements Clone for Reading<T> when T is Clone.
+/// #[derive(Clone)]
+/// struct Reading<T> {
+/// frequency: T,
+/// }
+/// ```
+///
+/// ## How can I implement `Clone`?
+///
+/// Types that are [`Copy`] should have a trivial implementation of `Clone`. More formally:
+/// if `T: Copy`, `x: T`, and `y: &T`, then `let x = y.clone();` is equivalent to `let x = *y;`.
+/// Manual implementations should be careful to uphold this invariant; however, unsafe code
+/// must not rely on it to ensure memory safety.
+///
+/// An example is a generic struct holding a function pointer. In this case, the
+/// implementation of `Clone` cannot be `derive`d, but can be implemented as:
+///
+/// ```
+/// struct Generate<T>(fn() -> T);
+///
+/// impl<T> Copy for Generate<T> {}
+///
+/// impl<T> Clone for Generate<T> {
+/// fn clone(&self) -> Self {
+/// *self
+/// }
+/// }
+/// ```
+///
+/// ## Additional implementors
+///
+/// In addition to the [implementors listed below][impls],
+/// the following types also implement `Clone`:
+///
+/// * Function item types (i.e., the distinct types defined for each function)
+/// * Function pointer types (e.g., `fn() -> i32`)
+/// * Array types, for all sizes, if the item type also implements `Clone` (e.g., `[i32; 123456]`)
+/// * Tuple types, if each component also implements `Clone` (e.g., `()`, `(i32, bool)`)
+/// * Closure types, if they capture no value from the environment
+/// or if all such captured values implement `Clone` themselves.
+/// Note that variables captured by shared reference always implement `Clone`
+/// (even if the referent doesn't),
+/// while variables captured by mutable reference never implement `Clone`.
+///
+/// [impls]: #implementors
+#[stable(feature = "rust1", since = "1.0.0")]
+#[lang = "clone"]
+pub trait Clone: Sized {
+ /// Returns a copy of the value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let hello = "Hello"; // &str implements Clone
+ ///
+ /// assert_eq!("Hello", hello.clone());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "cloning is often expensive and is not expected to have side effects"]
+ fn clone(&self) -> Self;
+
+ /// Performs copy-assignment from `source`.
+ ///
+ /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality,
+ /// but can be overridden to reuse the resources of `a` to avoid unnecessary
+ /// allocations.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn clone_from(&mut self, source: &Self) {
+ *self = source.clone()
+ }
+}
+
+/// Derive macro generating an impl of the trait `Clone`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, derive_clone_copy)]
+pub macro Clone($item:item) {
+ /* compiler built-in */
+}
+
+// FIXME(aburka): these structs are used solely by #[derive] to
+// assert that every component of a type implements Clone or Copy.
+//
+// These structs should never appear in user code.
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+#[unstable(
+ feature = "derive_clone_copy",
+ reason = "deriving hack, should not be public",
+ issue = "none"
+)]
+pub struct AssertParamIsClone<T: Clone + ?Sized> {
+ _field: crate::marker::PhantomData<T>,
+}
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+#[unstable(
+ feature = "derive_clone_copy",
+ reason = "deriving hack, should not be public",
+ issue = "none"
+)]
+pub struct AssertParamIsCopy<T: Copy + ?Sized> {
+ _field: crate::marker::PhantomData<T>,
+}
+
+/// Implementations of `Clone` for primitive types.
+///
+/// Implementations that cannot be described in Rust
+/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
+/// in `rustc_trait_selection`.
+mod impls {
+
+ use super::Clone;
+
+ macro_rules! impl_clone {
+ ($($t:ty)*) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Clone for $t {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+ )*
+ }
+ }
+
+ impl_clone! {
+ usize u8 u16 u32 u64 u128
+ isize i8 i16 i32 i64 i128
+ f32 f64
+ bool char
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Clone for ! {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Clone for *const T {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Clone for *mut T {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ /// Shared references can be cloned, but mutable references *cannot*!
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Clone for &T {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+ }
+
+ /// Shared references can be cloned, but mutable references *cannot*!
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> !Clone for &mut T {}
+}
--- /dev/null
+//! Functionality for ordering and comparison.
+//!
+//! This module contains various tools for ordering and comparing values. In
+//! summary:
+//!
+//! * [`Eq`] and [`PartialEq`] are traits that allow you to define total and
+//! partial equality between values, respectively. Implementing them overloads
+//! the `==` and `!=` operators.
+//! * [`Ord`] and [`PartialOrd`] are traits that allow you to define total and
+//! partial orderings between values, respectively. Implementing them overloads
+//! the `<`, `<=`, `>`, and `>=` operators.
+//! * [`Ordering`] is an enum returned by the main functions of [`Ord`] and
+//! [`PartialOrd`], and describes an ordering.
+//! * [`Reverse`] is a struct that allows you to easily reverse an ordering.
+//! * [`max`] and [`min`] are functions that build off of [`Ord`] and allow you
+//! to find the maximum or minimum of two values.
+//!
+//! For more details, see the respective documentation of each item in the list.
+//!
+//! [`max`]: Ord::max
+//! [`min`]: Ord::min
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use self::Ordering::*;
+
+/// Trait for equality comparisons which are [partial equivalence
+/// relations](https://en.wikipedia.org/wiki/Partial_equivalence_relation).
+///
+/// This trait allows for partial equality, for types that do not have a full
+/// equivalence relation. For example, in floating point numbers `NaN != NaN`,
+/// so floating point types implement `PartialEq` but not [`Eq`].
+///
+/// Formally, the equality must be (for all `a`, `b` and `c`):
+///
+/// - symmetric: `a == b` implies `b == a`; and
+/// - transitive: `a == b` and `b == c` implies `a == c`.
+///
+/// Note that these requirements mean that the trait itself must be implemented
+/// symmetrically and transitively: if `T: PartialEq<U>` and `U: PartialEq<V>`
+/// then `U: PartialEq<T>` and `T: PartialEq<V>`.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d on structs, two
+/// instances are equal if all fields are equal, and not equal if any fields
+/// are not equal. When `derive`d on enums, each variant is equal to itself
+/// and not equal to the other variants.
+///
+/// ## How can I implement `PartialEq`?
+///
+/// `PartialEq` only requires the [`eq`] method to be implemented; [`ne`] is defined
+/// in terms of it by default. Any manual implementation of [`ne`] *must* respect
+/// the rule that [`eq`] is a strict inverse of [`ne`]; that is, `!(a == b)` if and
+/// only if `a != b`.
+///
+/// Implementations of `PartialEq`, [`PartialOrd`], and [`Ord`] *must* agree with
+/// each other. It's easy to accidentally make them disagree by deriving some
+/// of the traits and manually implementing others.
+///
+/// An example implementation for a domain in which two books are considered
+/// the same book if their ISBN matches, even if the formats differ:
+///
+/// ```
+/// enum BookFormat {
+/// Paperback,
+/// Hardback,
+/// Ebook,
+/// }
+///
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// impl PartialEq for Book {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.isbn == other.isbn
+/// }
+/// }
+///
+/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
+/// let b2 = Book { isbn: 3, format: BookFormat::Ebook };
+/// let b3 = Book { isbn: 10, format: BookFormat::Paperback };
+///
+/// assert!(b1 == b2);
+/// assert!(b1 != b3);
+/// ```
+///
+/// ## How can I compare two different types?
+///
+/// The type you can compare with is controlled by `PartialEq`'s type parameter.
+/// For example, let's tweak our previous code a bit:
+///
+/// ```
+/// // The derive implements <BookFormat> == <BookFormat> comparisons
+/// #[derive(PartialEq)]
+/// enum BookFormat {
+/// Paperback,
+/// Hardback,
+/// Ebook,
+/// }
+///
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// // Implement <Book> == <BookFormat> comparisons
+/// impl PartialEq<BookFormat> for Book {
+/// fn eq(&self, other: &BookFormat) -> bool {
+/// self.format == *other
+/// }
+/// }
+///
+/// // Implement <BookFormat> == <Book> comparisons
+/// impl PartialEq<Book> for BookFormat {
+/// fn eq(&self, other: &Book) -> bool {
+/// *self == other.format
+/// }
+/// }
+///
+/// let b1 = Book { isbn: 3, format: BookFormat::Paperback };
+///
+/// assert!(b1 == BookFormat::Paperback);
+/// assert!(BookFormat::Ebook != b1);
+/// ```
+///
+/// By changing `impl PartialEq for Book` to `impl PartialEq<BookFormat> for Book`,
+/// we allow `BookFormat`s to be compared with `Book`s.
+///
+/// A comparison like the one above, which ignores some fields of the struct,
+/// can be dangerous. It can easily lead to an unintended violation of the
+/// requirements for a partial equivalence relation. For example, if we kept
+/// the above implementation of `PartialEq<Book>` for `BookFormat` and added an
+/// implementation of `PartialEq<Book>` for `Book` (either via a `#[derive]` or
+/// via the manual implementation from the first example) then the result would
+/// violate transitivity:
+///
+/// ```should_panic
+/// #[derive(PartialEq)]
+/// enum BookFormat {
+/// Paperback,
+/// Hardback,
+/// Ebook,
+/// }
+///
+/// #[derive(PartialEq)]
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+///
+/// impl PartialEq<BookFormat> for Book {
+/// fn eq(&self, other: &BookFormat) -> bool {
+/// self.format == *other
+/// }
+/// }
+///
+/// impl PartialEq<Book> for BookFormat {
+/// fn eq(&self, other: &Book) -> bool {
+/// *self == other.format
+/// }
+/// }
+///
+/// fn main() {
+/// let b1 = Book { isbn: 1, format: BookFormat::Paperback };
+/// let b2 = Book { isbn: 2, format: BookFormat::Paperback };
+///
+/// assert!(b1 == BookFormat::Paperback);
+/// assert!(BookFormat::Paperback == b2);
+///
+/// // The following should hold by transitivity but doesn't.
+/// assert!(b1 == b2); // <-- PANICS
+/// }
+/// ```
+///
+/// # Examples
+///
+/// ```
+/// let x: u32 = 0;
+/// let y: u32 = 1;
+///
+/// assert_eq!(x == y, false);
+/// assert_eq!(x.eq(&y), false);
+/// ```
+///
+/// [`eq`]: PartialEq::eq
+/// [`ne`]: PartialEq::ne
+#[lang = "eq"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "==")]
+#[doc(alias = "!=")]
+#[rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} == {Rhs}`"
+)]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ /// This method tests for `self` and `other` values to be equal, and is used
+ /// by `==`.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn eq(&self, other: &Rhs) -> bool;
+
+ /// This method tests for `!=`.
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn ne(&self, other: &Rhs) -> bool {
+ !self.eq(other)
+ }
+}
+
+/// Derive macro generating an impl of the trait `PartialEq`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, structural_match)]
+pub macro PartialEq($item:item) {
+ /* compiler built-in */
+}
+
+/// Trait for equality comparisons which are [equivalence relations](
+/// https://en.wikipedia.org/wiki/Equivalence_relation).
+///
+/// This means, that in addition to `a == b` and `a != b` being strict inverses, the equality must
+/// be (for all `a`, `b` and `c`):
+///
+/// - reflexive: `a == a`;
+/// - symmetric: `a == b` implies `b == a`; and
+/// - transitive: `a == b` and `b == c` implies `a == c`.
+///
+/// This property cannot be checked by the compiler, and therefore `Eq` implies
+/// [`PartialEq`], and has no extra methods.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has
+/// no extra methods, it is only informing the compiler that this is an
+/// equivalence relation rather than a partial equivalence relation. Note that
+/// the `derive` strategy requires all fields are `Eq`, which isn't
+/// always desired.
+///
+/// ## How can I implement `Eq`?
+///
+/// If you cannot use the `derive` strategy, specify that your type implements
+/// `Eq`, which has no methods:
+///
+/// ```
+/// enum BookFormat { Paperback, Hardback, Ebook }
+/// struct Book {
+/// isbn: i32,
+/// format: BookFormat,
+/// }
+/// impl PartialEq for Book {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.isbn == other.isbn
+/// }
+/// }
+/// impl Eq for Book {}
+/// ```
+#[doc(alias = "==")]
+#[doc(alias = "!=")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Eq: PartialEq<Self> {
+ // this method is used solely by #[deriving] to assert
+ // that every component of a type implements #[deriving]
+ // itself, the current deriving infrastructure means doing this
+ // assertion without using a method on this trait is nearly
+ // impossible.
+ //
+ // This should never be implemented by hand.
+ #[doc(hidden)]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn assert_receiver_is_total_eq(&self) {}
+}
+
+/// Derive macro generating an impl of the trait `Eq`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match)]
+pub macro Eq($item:item) {
+ /* compiler built-in */
+}
+
+// FIXME: this struct is used solely by #[derive] to
+// assert that every component of a type implements Eq.
+//
+// This struct should never appear in user code.
+#[doc(hidden)]
+#[allow(missing_debug_implementations)]
+#[unstable(feature = "derive_eq", reason = "deriving hack, should not be public", issue = "none")]
+pub struct AssertParamIsEq<T: Eq + ?Sized> {
+ _field: crate::marker::PhantomData<T>,
+}
+
+/// An `Ordering` is the result of a comparison between two values.
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// let result = 1.cmp(&2);
+/// assert_eq!(Ordering::Less, result);
+///
+/// let result = 1.cmp(&1);
+/// assert_eq!(Ordering::Equal, result);
+///
+/// let result = 2.cmp(&1);
+/// assert_eq!(Ordering::Greater, result);
+/// ```
+#[derive(Clone, Copy, PartialEq, Debug, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Ordering {
+ /// An ordering where a compared value is less than another.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Less = -1,
+ /// An ordering where a compared value is equal to another.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Equal = 0,
+ /// An ordering where a compared value is greater than another.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Greater = 1,
+}
+
+impl Ordering {
+ /// Reverses the `Ordering`.
+ ///
+ /// * `Less` becomes `Greater`.
+ /// * `Greater` becomes `Less`.
+ /// * `Equal` becomes `Equal`.
+ ///
+ /// # Examples
+ ///
+ /// Basic behavior:
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.reverse(), Ordering::Greater);
+ /// assert_eq!(Ordering::Equal.reverse(), Ordering::Equal);
+ /// assert_eq!(Ordering::Greater.reverse(), Ordering::Less);
+ /// ```
+ ///
+ /// This method can be used to reverse a comparison:
+ ///
+ /// ```
+ /// let data: &mut [_] = &mut [2, 10, 5, 8];
+ ///
+ /// // sort the array from largest to smallest.
+ /// data.sort_by(|a, b| a.cmp(b).reverse());
+ ///
+ /// let b: &mut [_] = &mut [10, 8, 5, 2];
+ /// assert!(data == b);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "const_ordering", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn reverse(self) -> Ordering {
+ match self {
+ Less => Greater,
+ Equal => Equal,
+ Greater => Less,
+ }
+ }
+
+ /// Chains two orderings.
+ ///
+ /// Returns `self` when it's not `Equal`. Otherwise returns `other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// let result = Ordering::Equal.then(Ordering::Less);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then(Ordering::Equal);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then(Ordering::Greater);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Equal.then(Ordering::Equal);
+ /// assert_eq!(result, Ordering::Equal);
+ ///
+ /// let x: (i64, i64, i64) = (1, 2, 7);
+ /// let y: (i64, i64, i64) = (1, 5, 3);
+ /// let result = x.0.cmp(&y.0).then(x.1.cmp(&y.1)).then(x.2.cmp(&y.2));
+ ///
+ /// assert_eq!(result, Ordering::Less);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[rustc_const_stable(feature = "const_ordering", since = "1.48.0")]
+ #[stable(feature = "ordering_chaining", since = "1.17.0")]
+ pub const fn then(self, other: Ordering) -> Ordering {
+ match self {
+ Equal => other,
+ _ => self,
+ }
+ }
+
+ /// Chains the ordering with the given function.
+ ///
+ /// Returns `self` when it's not `Equal`. Otherwise calls `f` and returns
+ /// the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// let result = Ordering::Equal.then_with(|| Ordering::Less);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then_with(|| Ordering::Equal);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Less.then_with(|| Ordering::Greater);
+ /// assert_eq!(result, Ordering::Less);
+ ///
+ /// let result = Ordering::Equal.then_with(|| Ordering::Equal);
+ /// assert_eq!(result, Ordering::Equal);
+ ///
+ /// let x: (i64, i64, i64) = (1, 2, 7);
+ /// let y: (i64, i64, i64) = (1, 5, 3);
+ /// let result = x.0.cmp(&y.0).then_with(|| x.1.cmp(&y.1)).then_with(|| x.2.cmp(&y.2));
+ ///
+ /// assert_eq!(result, Ordering::Less);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "ordering_chaining", since = "1.17.0")]
+ pub fn then_with<F: FnOnce() -> Ordering>(self, f: F) -> Ordering {
+ match self {
+ Equal => f(),
+ _ => self,
+ }
+ }
+}
+
+/// A helper struct for reverse ordering.
+///
+/// This struct is a helper to be used with functions like [`Vec::sort_by_key`] and
+/// can be used to reverse order a part of a key.
+///
+/// [`Vec::sort_by_key`]: ../../std/vec/struct.Vec.html#method.sort_by_key
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp::Reverse;
+///
+/// let mut v = vec![1, 2, 3, 4, 5, 6];
+/// v.sort_by_key(|&num| (num > 3, Reverse(num)));
+/// assert_eq!(v, vec![3, 2, 1, 6, 5, 4]);
+/// ```
+#[derive(PartialEq, Eq, Debug, Copy, Clone, Default, Hash)]
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+pub struct Reverse<T>(#[stable(feature = "reverse_cmp_key", since = "1.19.0")] pub T);
+
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+impl<T: PartialOrd> PartialOrd for Reverse<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Reverse<T>) -> Option<Ordering> {
+ other.0.partial_cmp(&self.0)
+ }
+
+ #[inline]
+ fn lt(&self, other: &Self) -> bool {
+ other.0 < self.0
+ }
+ #[inline]
+ fn le(&self, other: &Self) -> bool {
+ other.0 <= self.0
+ }
+ #[inline]
+ fn gt(&self, other: &Self) -> bool {
+ other.0 > self.0
+ }
+ #[inline]
+ fn ge(&self, other: &Self) -> bool {
+ other.0 >= self.0
+ }
+}
+
+#[stable(feature = "reverse_cmp_key", since = "1.19.0")]
+impl<T: Ord> Ord for Reverse<T> {
+ #[inline]
+ fn cmp(&self, other: &Reverse<T>) -> Ordering {
+ other.0.cmp(&self.0)
+ }
+}
+
+/// Trait for types that form a [total order](https://en.wikipedia.org/wiki/Total_order).
+///
+/// An order is a total order if it is (for all `a`, `b` and `c`):
+///
+/// - total and asymmetric: exactly one of `a < b`, `a == b` or `a > b` is true; and
+/// - transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d on structs, it will produce a
+/// [lexicographic](https://en.wikipedia.org/wiki/Lexicographic_order) ordering based on the top-to-bottom declaration order of the struct's members.
+/// When `derive`d on enums, variants are ordered by their top-to-bottom discriminant order.
+///
+/// ## Lexicographical comparison
+///
+/// Lexicographical comparison is an operation with the following properties:
+/// - Two sequences are compared element by element.
+/// - The first mismatching element defines which sequence is lexicographically less or greater than the other.
+/// - If one sequence is a prefix of another, the shorter sequence is lexicographically less than the other.
+/// - If two sequence have equivalent elements and are of the same length, then the sequences are lexicographically equal.
+/// - An empty sequence is lexicographically less than any non-empty sequence.
+/// - Two empty sequences are lexicographically equal.
+///
+/// ## How can I implement `Ord`?
+///
+/// `Ord` requires that the type also be [`PartialOrd`] and [`Eq`] (which requires [`PartialEq`]).
+///
+/// Then you must define an implementation for [`cmp`]. You may find it useful to use
+/// [`cmp`] on your type's fields.
+///
+/// Implementations of [`PartialEq`], [`PartialOrd`], and `Ord` *must*
+/// agree with each other. That is, `a.cmp(b) == Ordering::Equal` if
+/// and only if `a == b` and `Some(a.cmp(b)) == a.partial_cmp(b)` for
+/// all `a` and `b`. It's easy to accidentally make them disagree by
+/// deriving some of the traits and manually implementing others.
+///
+/// Here's an example where you want to sort people by height only, disregarding `id`
+/// and `name`:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// #[derive(Eq)]
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: u32,
+/// }
+///
+/// impl Ord for Person {
+/// fn cmp(&self, other: &Self) -> Ordering {
+/// self.height.cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+/// Some(self.cmp(other))
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// [`cmp`]: Ord::cmp
+#[doc(alias = "<")]
+#[doc(alias = ">")]
+#[doc(alias = "<=")]
+#[doc(alias = ">=")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Ord: Eq + PartialOrd<Self> {
+ /// This method returns an [`Ordering`] between `self` and `other`.
+ ///
+ /// By convention, `self.cmp(&other)` returns the ordering matching the expression
+ /// `self <operator> other` if true.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(5.cmp(&10), Ordering::Less);
+ /// assert_eq!(10.cmp(&5), Ordering::Greater);
+ /// assert_eq!(5.cmp(&5), Ordering::Equal);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn cmp(&self, other: &Self) -> Ordering;
+
+ /// Compares and returns the maximum of two values.
+ ///
+ /// Returns the second argument if the comparison determines them to be equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(2, 1.max(2));
+ /// assert_eq!(2, 2.max(2));
+ /// ```
+ #[stable(feature = "ord_max_min", since = "1.21.0")]
+ #[inline]
+ #[must_use]
+ fn max(self, other: Self) -> Self
+ where
+ Self: Sized,
+ {
+ max_by(self, other, Ord::cmp)
+ }
+
+ /// Compares and returns the minimum of two values.
+ ///
+ /// Returns the first argument if the comparison determines them to be equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(1, 1.min(2));
+ /// assert_eq!(2, 2.min(2));
+ /// ```
+ #[stable(feature = "ord_max_min", since = "1.21.0")]
+ #[inline]
+ #[must_use]
+ fn min(self, other: Self) -> Self
+ where
+ Self: Sized,
+ {
+ min_by(self, other, Ord::cmp)
+ }
+
+ /// Restrict a value to a certain interval.
+ ///
+ /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// less than `min`. Otherwise this returns `self`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `min > max`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(clamp)]
+ ///
+ /// assert!((-3).clamp(-2, 1) == -2);
+ /// assert!(0.clamp(-2, 1) == 0);
+ /// assert!(2.clamp(-2, 1) == 1);
+ /// ```
+ #[must_use]
+ #[unstable(feature = "clamp", issue = "44095")]
+ fn clamp(self, min: Self, max: Self) -> Self
+ where
+ Self: Sized,
+ {
+ assert!(min <= max);
+ if self < min {
+ min
+ } else if self > max {
+ max
+ } else {
+ self
+ }
+ }
+}
+
+/// Derive macro generating an impl of the trait `Ord`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics)]
+pub macro Ord($item:item) {
+ /* compiler built-in */
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Eq for Ordering {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for Ordering {
+ #[inline]
+ fn cmp(&self, other: &Ordering) -> Ordering {
+ (*self as i32).cmp(&(*other as i32))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for Ordering {
+ #[inline]
+ fn partial_cmp(&self, other: &Ordering) -> Option<Ordering> {
+ (*self as i32).partial_cmp(&(*other as i32))
+ }
+}
+
+/// Trait for values that can be compared for a sort-order.
+///
+/// The comparison must satisfy, for all `a`, `b` and `c`:
+///
+/// - asymmetry: if `a < b` then `!(a > b)`, as well as `a > b` implying `!(a < b)`; and
+/// - transitivity: `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+///
+/// Note that these requirements mean that the trait itself must be implemented symmetrically and
+/// transitively: if `T: PartialOrd<U>` and `U: PartialOrd<V>` then `U: PartialOrd<T>` and `T:
+/// PartialOrd<V>`.
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]`. When `derive`d on structs, it will produce a
+/// lexicographic ordering based on the top-to-bottom declaration order of the struct's members.
+/// When `derive`d on enums, variants are ordered by their top-to-bottom discriminant order.
+///
+/// ## How can I implement `PartialOrd`?
+///
+/// `PartialOrd` only requires implementation of the [`partial_cmp`] method, with the others
+/// generated from default implementations.
+///
+/// However it remains possible to implement the others separately for types which do not have a
+/// total order. For example, for floating point numbers, `NaN < 0 == false` and `NaN >= 0 ==
+/// false` (cf. IEEE 754-2008 section 5.11).
+///
+/// `PartialOrd` requires your type to be [`PartialEq`].
+///
+/// Implementations of [`PartialEq`], `PartialOrd`, and [`Ord`] *must* agree with each other. It's
+/// easy to accidentally make them disagree by deriving some of the traits and manually
+/// implementing others.
+///
+/// If your type is [`Ord`], you can implement [`partial_cmp`] by using [`cmp`]:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// #[derive(Eq)]
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: u32,
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+/// Some(self.cmp(other))
+/// }
+/// }
+///
+/// impl Ord for Person {
+/// fn cmp(&self, other: &Self) -> Ordering {
+/// self.height.cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// You may also find it useful to use [`partial_cmp`] on your type's fields. Here
+/// is an example of `Person` types who have a floating-point `height` field that
+/// is the only field to be used for sorting:
+///
+/// ```
+/// use std::cmp::Ordering;
+///
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// height: f64,
+/// }
+///
+/// impl PartialOrd for Person {
+/// fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+/// self.height.partial_cmp(&other.height)
+/// }
+/// }
+///
+/// impl PartialEq for Person {
+/// fn eq(&self, other: &Self) -> bool {
+/// self.height == other.height
+/// }
+/// }
+/// ```
+///
+/// # Examples
+///
+/// ```
+/// let x : u32 = 0;
+/// let y : u32 = 1;
+///
+/// assert_eq!(x < y, true);
+/// assert_eq!(x.lt(&y), true);
+/// ```
+///
+/// [`partial_cmp`]: PartialOrd::partial_cmp
+/// [`cmp`]: Ord::cmp
+#[lang = "partial_ord"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = ">")]
+#[doc(alias = "<")]
+#[doc(alias = "<=")]
+#[doc(alias = ">=")]
+#[rustc_on_unimplemented(
+ message = "can't compare `{Self}` with `{Rhs}`",
+ label = "no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`"
+)]
+pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> {
+ /// This method returns an ordering between `self` and `other` values if one exists.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// let result = 1.0.partial_cmp(&2.0);
+ /// assert_eq!(result, Some(Ordering::Less));
+ ///
+ /// let result = 1.0.partial_cmp(&1.0);
+ /// assert_eq!(result, Some(Ordering::Equal));
+ ///
+ /// let result = 2.0.partial_cmp(&1.0);
+ /// assert_eq!(result, Some(Ordering::Greater));
+ /// ```
+ ///
+ /// When comparison is impossible:
+ ///
+ /// ```
+ /// let result = f64::NAN.partial_cmp(&1.0);
+ /// assert_eq!(result, None);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn partial_cmp(&self, other: &Rhs) -> Option<Ordering>;
+
+ /// This method tests less than (for `self` and `other`) and is used by the `<` operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 1.0 < 2.0;
+ /// assert_eq!(result, true);
+ ///
+ /// let result = 2.0 < 1.0;
+ /// assert_eq!(result, false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn lt(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Less))
+ }
+
+ /// This method tests less than or equal to (for `self` and `other`) and is used by the `<=`
+ /// operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 1.0 <= 2.0;
+ /// assert_eq!(result, true);
+ ///
+ /// let result = 2.0 <= 2.0;
+ /// assert_eq!(result, true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn le(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Less | Equal))
+ }
+
+ /// This method tests greater than (for `self` and `other`) and is used by the `>` operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 1.0 > 2.0;
+ /// assert_eq!(result, false);
+ ///
+ /// let result = 2.0 > 2.0;
+ /// assert_eq!(result, false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn gt(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Greater))
+ }
+
+ /// This method tests greater than or equal to (for `self` and `other`) and is used by the `>=`
+ /// operator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let result = 2.0 >= 1.0;
+ /// assert_eq!(result, true);
+ ///
+ /// let result = 2.0 >= 2.0;
+ /// assert_eq!(result, true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn ge(&self, other: &Rhs) -> bool {
+ matches!(self.partial_cmp(other), Some(Greater | Equal))
+ }
+}
+
+/// Derive macro generating an impl of the trait `PartialOrd`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics)]
+pub macro PartialOrd($item:item) {
+ /* compiler built-in */
+}
+
+/// Compares and returns the minimum of two values.
+///
+/// Returns the first argument if the comparison determines them to be equal.
+///
+/// Internally uses an alias to [`Ord::min`].
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(1, cmp::min(1, 2));
+/// assert_eq!(2, cmp::min(2, 2));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn min<T: Ord>(v1: T, v2: T) -> T {
+ v1.min(v2)
+}
+
+/// Returns the minimum of two values with respect to the specified comparison function.
+///
+/// Returns the first argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_min_max_by)]
+///
+/// use std::cmp;
+///
+/// assert_eq!(cmp::min_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), 1);
+/// assert_eq!(cmp::min_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), -2);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_min_max_by", issue = "64460")]
+pub fn min_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+ match compare(&v1, &v2) {
+ Ordering::Less | Ordering::Equal => v1,
+ Ordering::Greater => v2,
+ }
+}
+
+/// Returns the element that gives the minimum value from the specified function.
+///
+/// Returns the first argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_min_max_by)]
+///
+/// use std::cmp;
+///
+/// assert_eq!(cmp::min_by_key(-2, 1, |x: &i32| x.abs()), 1);
+/// assert_eq!(cmp::min_by_key(-2, 2, |x: &i32| x.abs()), -2);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_min_max_by", issue = "64460")]
+pub fn min_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
+ min_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+}
+
+/// Compares and returns the maximum of two values.
+///
+/// Returns the second argument if the comparison determines them to be equal.
+///
+/// Internally uses an alias to [`Ord::max`].
+///
+/// # Examples
+///
+/// ```
+/// use std::cmp;
+///
+/// assert_eq!(2, cmp::max(1, 2));
+/// assert_eq!(2, cmp::max(2, 2));
+/// ```
+#[inline]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn max<T: Ord>(v1: T, v2: T) -> T {
+ v1.max(v2)
+}
+
+/// Returns the maximum of two values with respect to the specified comparison function.
+///
+/// Returns the second argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_min_max_by)]
+///
+/// use std::cmp;
+///
+/// assert_eq!(cmp::max_by(-2, 1, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), -2);
+/// assert_eq!(cmp::max_by(-2, 2, |x: &i32, y: &i32| x.abs().cmp(&y.abs())), 2);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_min_max_by", issue = "64460")]
+pub fn max_by<T, F: FnOnce(&T, &T) -> Ordering>(v1: T, v2: T, compare: F) -> T {
+ match compare(&v1, &v2) {
+ Ordering::Less | Ordering::Equal => v2,
+ Ordering::Greater => v1,
+ }
+}
+
+/// Returns the element that gives the maximum value from the specified function.
+///
+/// Returns the second argument if the comparison determines them to be equal.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(cmp_min_max_by)]
+///
+/// use std::cmp;
+///
+/// assert_eq!(cmp::max_by_key(-2, 1, |x: &i32| x.abs()), -2);
+/// assert_eq!(cmp::max_by_key(-2, 2, |x: &i32| x.abs()), 2);
+/// ```
+#[inline]
+#[must_use]
+#[unstable(feature = "cmp_min_max_by", issue = "64460")]
+pub fn max_by_key<T, F: FnMut(&T) -> K, K: Ord>(v1: T, v2: T, mut f: F) -> T {
+ max_by(v1, v2, |v1, v2| f(v1).cmp(&f(v2)))
+}
+
+// Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types
+mod impls {
+ use crate::cmp::Ordering::{self, Equal, Greater, Less};
+ use crate::hint::unreachable_unchecked;
+
+ macro_rules! partial_eq_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl PartialEq for $t {
+ #[inline]
+ fn eq(&self, other: &$t) -> bool { (*self) == (*other) }
+ #[inline]
+ fn ne(&self, other: &$t) -> bool { (*self) != (*other) }
+ }
+ )*)
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl PartialEq for () {
+ #[inline]
+ fn eq(&self, _other: &()) -> bool {
+ true
+ }
+ #[inline]
+ fn ne(&self, _other: &()) -> bool {
+ false
+ }
+ }
+
+ partial_eq_impl! {
+ bool char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64
+ }
+
+ macro_rules! eq_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Eq for $t {}
+ )*)
+ }
+
+ eq_impl! { () bool char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+ macro_rules! partial_ord_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl PartialOrd for $t {
+ #[inline]
+ fn partial_cmp(&self, other: &$t) -> Option<Ordering> {
+ match (self <= other, self >= other) {
+ (false, false) => None,
+ (false, true) => Some(Greater),
+ (true, false) => Some(Less),
+ (true, true) => Some(Equal),
+ }
+ }
+ #[inline]
+ fn lt(&self, other: &$t) -> bool { (*self) < (*other) }
+ #[inline]
+ fn le(&self, other: &$t) -> bool { (*self) <= (*other) }
+ #[inline]
+ fn ge(&self, other: &$t) -> bool { (*self) >= (*other) }
+ #[inline]
+ fn gt(&self, other: &$t) -> bool { (*self) > (*other) }
+ }
+ )*)
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl PartialOrd for () {
+ #[inline]
+ fn partial_cmp(&self, _: &()) -> Option<Ordering> {
+ Some(Equal)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl PartialOrd for bool {
+ #[inline]
+ fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
+ (*self as u8).partial_cmp(&(*other as u8))
+ }
+ }
+
+ partial_ord_impl! { f32 f64 }
+
+ macro_rules! ord_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl PartialOrd for $t {
+ #[inline]
+ fn partial_cmp(&self, other: &$t) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ #[inline]
+ fn lt(&self, other: &$t) -> bool { (*self) < (*other) }
+ #[inline]
+ fn le(&self, other: &$t) -> bool { (*self) <= (*other) }
+ #[inline]
+ fn ge(&self, other: &$t) -> bool { (*self) >= (*other) }
+ #[inline]
+ fn gt(&self, other: &$t) -> bool { (*self) > (*other) }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Ord for $t {
+ #[inline]
+ fn cmp(&self, other: &$t) -> Ordering {
+ // The order here is important to generate more optimal assembly.
+ // See <https://github.com/rust-lang/rust/issues/63758> for more info.
+ if *self < *other { Less }
+ else if *self == *other { Equal }
+ else { Greater }
+ }
+ }
+ )*)
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Ord for () {
+ #[inline]
+ fn cmp(&self, _other: &()) -> Ordering {
+ Equal
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Ord for bool {
+ #[inline]
+ fn cmp(&self, other: &bool) -> Ordering {
+ // Casting to i8's and converting the difference to an Ordering generates
+ // more optimal assembly.
+ // See <https://github.com/rust-lang/rust/issues/66780> for more info.
+ match (*self as i8) - (*other as i8) {
+ -1 => Less,
+ 0 => Equal,
+ 1 => Greater,
+ // SAFETY: bool as i8 returns 0 or 1, so the difference can't be anything else
+ _ => unsafe { unreachable_unchecked() },
+ }
+ }
+ }
+
+ ord_impl! { char usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl PartialEq for ! {
+ fn eq(&self, _: &!) -> bool {
+ *self
+ }
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Eq for ! {}
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl PartialOrd for ! {
+ fn partial_cmp(&self, _: &!) -> Option<Ordering> {
+ *self
+ }
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Ord for ! {
+ fn cmp(&self, _: &!) -> Ordering {
+ *self
+ }
+ }
+
+ // & pointers
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&B> for &A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialOrd<&B> for &A
+ where
+ A: PartialOrd<B>,
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &&B) -> Option<Ordering> {
+ PartialOrd::partial_cmp(*self, *other)
+ }
+ #[inline]
+ fn lt(&self, other: &&B) -> bool {
+ PartialOrd::lt(*self, *other)
+ }
+ #[inline]
+ fn le(&self, other: &&B) -> bool {
+ PartialOrd::le(*self, *other)
+ }
+ #[inline]
+ fn gt(&self, other: &&B) -> bool {
+ PartialOrd::gt(*self, *other)
+ }
+ #[inline]
+ fn ge(&self, other: &&B) -> bool {
+ PartialOrd::ge(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Ord for &A
+ where
+ A: Ord,
+ {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Eq for &A where A: Eq {}
+
+ // &mut pointers
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&mut B> for &mut A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&mut B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&mut B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialOrd<&mut B> for &mut A
+ where
+ A: PartialOrd<B>,
+ {
+ #[inline]
+ fn partial_cmp(&self, other: &&mut B) -> Option<Ordering> {
+ PartialOrd::partial_cmp(*self, *other)
+ }
+ #[inline]
+ fn lt(&self, other: &&mut B) -> bool {
+ PartialOrd::lt(*self, *other)
+ }
+ #[inline]
+ fn le(&self, other: &&mut B) -> bool {
+ PartialOrd::le(*self, *other)
+ }
+ #[inline]
+ fn gt(&self, other: &&mut B) -> bool {
+ PartialOrd::gt(*self, *other)
+ }
+ #[inline]
+ fn ge(&self, other: &&mut B) -> bool {
+ PartialOrd::ge(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Ord for &mut A
+ where
+ A: Ord,
+ {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(*self, *other)
+ }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized> Eq for &mut A where A: Eq {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&mut B> for &A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&mut B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&mut B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A: ?Sized, B: ?Sized> PartialEq<&B> for &mut A
+ where
+ A: PartialEq<B>,
+ {
+ #[inline]
+ fn eq(&self, other: &&B) -> bool {
+ PartialEq::eq(*self, *other)
+ }
+ #[inline]
+ fn ne(&self, other: &&B) -> bool {
+ PartialEq::ne(*self, *other)
+ }
+ }
+}
--- /dev/null
+//! Traits for conversions between types.
+//!
+//! The traits in this module provide a way to convert from one type to another type.
+//! Each trait serves a different purpose:
+//!
+//! - Implement the [`AsRef`] trait for cheap reference-to-reference conversions
+//! - Implement the [`AsMut`] trait for cheap mutable-to-mutable conversions
+//! - Implement the [`From`] trait for consuming value-to-value conversions
+//! - Implement the [`Into`] trait for consuming value-to-value conversions to types
+//! outside the current crate
+//! - The [`TryFrom`] and [`TryInto`] traits behave like [`From`] and [`Into`],
+//! but should be implemented when the conversion can fail.
+//!
+//! The traits in this module are often used as trait bounds for generic functions such that to
+//! arguments of multiple types are supported. See the documentation of each trait for examples.
+//!
+//! As a library author, you should always prefer implementing [`From<T>`][`From`] or
+//! [`TryFrom<T>`][`TryFrom`] rather than [`Into<U>`][`Into`] or [`TryInto<U>`][`TryInto`],
+//! as [`From`] and [`TryFrom`] provide greater flexibility and offer
+//! equivalent [`Into`] or [`TryInto`] implementations for free, thanks to a
+//! blanket implementation in the standard library. When targeting a version prior to Rust 1.41, it
+//! may be necessary to implement [`Into`] or [`TryInto`] directly when converting to a type
+//! outside the current crate.
+//!
+//! # Generic Implementations
+//!
+//! - [`AsRef`] and [`AsMut`] auto-dereference if the inner type is a reference
+//! - [`From`]`<U> for T` implies [`Into`]`<T> for U`
+//! - [`TryFrom`]`<U> for T` implies [`TryInto`]`<T> for U`
+//! - [`From`] and [`Into`] are reflexive, which means that all types can
+//! `into` themselves and `from` themselves
+//!
+//! See each trait for usage examples.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+
+mod num;
+
+#[unstable(feature = "convert_float_to_int", issue = "67057")]
+pub use num::FloatToInt;
+
+/// The identity function.
+///
+/// Two things are important to note about this function:
+///
+/// - It is not always equivalent to a closure like `|x| x`, since the
+/// closure may coerce `x` into a different type.
+///
+/// - It moves the input `x` passed to the function.
+///
+/// While it might seem strange to have a function that just returns back the
+/// input, there are some interesting uses.
+///
+/// # Examples
+///
+/// Using `identity` to do nothing in a sequence of other, interesting,
+/// functions:
+///
+/// ```rust
+/// use std::convert::identity;
+///
+/// fn manipulation(x: u32) -> u32 {
+/// // Let's pretend that adding one is an interesting function.
+/// x + 1
+/// }
+///
+/// let _arr = &[identity, manipulation];
+/// ```
+///
+/// Using `identity` as a "do nothing" base case in a conditional:
+///
+/// ```rust
+/// use std::convert::identity;
+///
+/// # let condition = true;
+/// #
+/// # fn manipulation(x: u32) -> u32 { x + 1 }
+/// #
+/// let do_stuff = if condition { manipulation } else { identity };
+///
+/// // Do more interesting stuff...
+///
+/// let _results = do_stuff(42);
+/// ```
+///
+/// Using `identity` to keep the `Some` variants of an iterator of `Option<T>`:
+///
+/// ```rust
+/// use std::convert::identity;
+///
+/// let iter = vec![Some(1), None, Some(3)].into_iter();
+/// let filtered = iter.filter_map(identity).collect::<Vec<_>>();
+/// assert_eq!(vec![1, 3], filtered);
+/// ```
+#[stable(feature = "convert_id", since = "1.33.0")]
+#[rustc_const_stable(feature = "const_identity", since = "1.33.0")]
+#[inline]
+pub const fn identity<T>(x: T) -> T {
+ x
+}
+
+/// Used to do a cheap reference-to-reference conversion.
+///
+/// This trait is similar to [`AsMut`] which is used for converting between mutable references.
+/// If you need to do a costly conversion it is better to implement [`From`] with type
+/// `&T` or write a custom function.
+///
+/// `AsRef` has the same signature as [`Borrow`], but [`Borrow`] is different in few aspects:
+///
+/// - Unlike `AsRef`, [`Borrow`] has a blanket impl for any `T`, and can be used to accept either
+/// a reference or a value.
+/// - [`Borrow`] also requires that [`Hash`], [`Eq`] and [`Ord`] for borrowed value are
+/// equivalent to those of the owned value. For this reason, if you want to
+/// borrow only a single field of a struct you can implement `AsRef`, but not [`Borrow`].
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use a
+/// dedicated method which returns an [`Option<T>`] or a [`Result<T, E>`].
+///
+/// # Generic Implementations
+///
+/// - `AsRef` auto-dereferences if the inner type is a reference or a mutable
+/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type
+/// `&mut Foo` or `&&mut Foo`)
+///
+/// # Examples
+///
+/// By using trait bounds we can accept arguments of different types as long as they can be
+/// converted to the specified type `T`.
+///
+/// For example: By creating a generic function that takes an `AsRef<str>` we express that we
+/// want to accept all references that can be converted to [`&str`] as an argument.
+/// Since both [`String`] and [`&str`] implement `AsRef<str>` we can accept both as input argument.
+///
+/// [`&str`]: primitive@str
+/// [`Option<T>`]: Option
+/// [`Result<T, E>`]: Result
+/// [`Borrow`]: crate::borrow::Borrow
+/// [`Eq`]: crate::cmp::Eq
+/// [`Ord`]: crate::cmp::Ord
+/// [`String`]: ../../std/string/struct.String.html
+///
+/// ```
+/// fn is_hello<T: AsRef<str>>(s: T) {
+/// assert_eq!("hello", s.as_ref());
+/// }
+///
+/// let s = "hello";
+/// is_hello(s);
+///
+/// let s = "hello".to_string();
+/// is_hello(s);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait AsRef<T: ?Sized> {
+ /// Performs the conversion.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_ref(&self) -> &T;
+}
+
+/// Used to do a cheap mutable-to-mutable reference conversion.
+///
+/// This trait is similar to [`AsRef`] but used for converting between mutable
+/// references. If you need to do a costly conversion it is better to
+/// implement [`From`] with type `&mut T` or write a custom function.
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use a
+/// dedicated method which returns an [`Option<T>`] or a [`Result<T, E>`].
+///
+/// [`Option<T>`]: Option
+/// [`Result<T, E>`]: Result
+///
+/// # Generic Implementations
+///
+/// - `AsMut` auto-dereferences if the inner type is a mutable reference
+/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo`
+/// or `&mut &mut Foo`)
+///
+/// # Examples
+///
+/// Using `AsMut` as trait bound for a generic function we can accept all mutable references
+/// that can be converted to type `&mut T`. Because [`Box<T>`] implements `AsMut<T>` we can
+/// write a function `add_one` that takes all arguments that can be converted to `&mut u64`.
+/// Because [`Box<T>`] implements `AsMut<T>`, `add_one` accepts arguments of type
+/// `&mut Box<u64>` as well:
+///
+/// ```
+/// fn add_one<T: AsMut<u64>>(num: &mut T) {
+/// *num.as_mut() += 1;
+/// }
+///
+/// let mut boxed_num = Box::new(0);
+/// add_one(&mut boxed_num);
+/// assert_eq!(*boxed_num, 1);
+/// ```
+///
+/// [`Box<T>`]: ../../std/boxed/struct.Box.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait AsMut<T: ?Sized> {
+ /// Performs the conversion.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn as_mut(&mut self) -> &mut T;
+}
+
+/// A value-to-value conversion that consumes the input value. The
+/// opposite of [`From`].
+///
+/// One should avoid implementing [`Into`] and implement [`From`] instead.
+/// Implementing [`From`] automatically provides one with an implementation of [`Into`]
+/// thanks to the blanket implementation in the standard library.
+///
+/// Prefer using [`Into`] over [`From`] when specifying trait bounds on a generic function
+/// to ensure that types that only implement [`Into`] can be used as well.
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use [`TryInto`].
+///
+/// # Generic Implementations
+///
+/// - [`From`]`<T> for U` implies `Into<U> for T`
+/// - [`Into`] is reflexive, which means that `Into<T> for T` is implemented
+///
+/// # Implementing [`Into`] for conversions to external types in old versions of Rust
+///
+/// Prior to Rust 1.41, if the destination type was not part of the current crate
+/// then you couldn't implement [`From`] directly.
+/// For example, take this code:
+///
+/// ```
+/// struct Wrapper<T>(Vec<T>);
+/// impl<T> From<Wrapper<T>> for Vec<T> {
+/// fn from(w: Wrapper<T>) -> Vec<T> {
+/// w.0
+/// }
+/// }
+/// ```
+/// This will fail to compile in older versions of the language because Rust's orphaning rules
+/// used to be a little bit more strict. To bypass this, you could implement [`Into`] directly:
+///
+/// ```
+/// struct Wrapper<T>(Vec<T>);
+/// impl<T> Into<Vec<T>> for Wrapper<T> {
+/// fn into(self) -> Vec<T> {
+/// self.0
+/// }
+/// }
+/// ```
+///
+/// It is important to understand that [`Into`] does not provide a [`From`] implementation
+/// (as [`From`] does with [`Into`]). Therefore, you should always try to implement [`From`]
+/// and then fall back to [`Into`] if [`From`] can't be implemented.
+///
+/// # Examples
+///
+/// [`String`] implements [`Into`]`<`[`Vec`]`<`[`u8`]`>>`:
+///
+/// In order to express that we want a generic function to take all arguments that can be
+/// converted to a specified type `T`, we can use a trait bound of [`Into`]`<T>`.
+/// For example: The function `is_hello` takes all arguments that can be converted into a
+/// [`Vec`]`<`[`u8`]`>`.
+///
+/// ```
+/// fn is_hello<T: Into<Vec<u8>>>(s: T) {
+/// let bytes = b"hello".to_vec();
+/// assert_eq!(bytes, s.into());
+/// }
+///
+/// let s = "hello".to_string();
+/// is_hello(s);
+/// ```
+///
+/// [`Option<T>`]: Option
+/// [`Result<T, E>`]: Result
+/// [`String`]: ../../std/string/struct.String.html
+/// [`Vec`]: ../../std/vec/struct.Vec.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Into<T>: Sized {
+ /// Performs the conversion.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn into(self) -> T;
+}
+
+/// Used to do value-to-value conversions while consuming the input value. It is the reciprocal of
+/// [`Into`].
+///
+/// One should always prefer implementing `From` over [`Into`]
+/// because implementing `From` automatically provides one with an implementation of [`Into`]
+/// thanks to the blanket implementation in the standard library.
+///
+/// Only implement [`Into`] when targeting a version prior to Rust 1.41 and converting to a type
+/// outside the current crate.
+/// `From` was not able to do these types of conversions in earlier versions because of Rust's
+/// orphaning rules.
+/// See [`Into`] for more details.
+///
+/// Prefer using [`Into`] over using `From` when specifying trait bounds on a generic function.
+/// This way, types that directly implement [`Into`] can be used as arguments as well.
+///
+/// The `From` is also very useful when performing error handling. When constructing a function
+/// that is capable of failing, the return type will generally be of the form `Result<T, E>`.
+/// The `From` trait simplifies error handling by allowing a function to return a single error type
+/// that encapsulate multiple error types. See the "Examples" section and [the book][book] for more
+/// details.
+///
+/// **Note: This trait must not fail**. If the conversion can fail, use [`TryFrom`].
+///
+/// # Generic Implementations
+///
+/// - `From<T> for U` implies [`Into`]`<U> for T`
+/// - `From` is reflexive, which means that `From<T> for T` is implemented
+///
+/// # Examples
+///
+/// [`String`] implements `From<&str>`:
+///
+/// An explicit conversion from a `&str` to a String is done as follows:
+///
+/// ```
+/// let string = "hello".to_string();
+/// let other_string = String::from("hello");
+///
+/// assert_eq!(string, other_string);
+/// ```
+///
+/// While performing error handling it is often useful to implement `From` for your own error type.
+/// By converting underlying error types to our own custom error type that encapsulates the
+/// underlying error type, we can return a single error type without losing information on the
+/// underlying cause. The '?' operator automatically converts the underlying error type to our
+/// custom error type by calling `Into<CliError>::into` which is automatically provided when
+/// implementing `From`. The compiler then infers which implementation of `Into` should be used.
+///
+/// ```
+/// use std::fs;
+/// use std::io;
+/// use std::num;
+///
+/// enum CliError {
+/// IoError(io::Error),
+/// ParseError(num::ParseIntError),
+/// }
+///
+/// impl From<io::Error> for CliError {
+/// fn from(error: io::Error) -> Self {
+/// CliError::IoError(error)
+/// }
+/// }
+///
+/// impl From<num::ParseIntError> for CliError {
+/// fn from(error: num::ParseIntError) -> Self {
+/// CliError::ParseError(error)
+/// }
+/// }
+///
+/// fn open_and_parse_file(file_name: &str) -> Result<i32, CliError> {
+/// let mut contents = fs::read_to_string(&file_name)?;
+/// let num: i32 = contents.trim().parse()?;
+/// Ok(num)
+/// }
+/// ```
+///
+/// [`Option<T>`]: Option
+/// [`Result<T, E>`]: Result
+/// [`String`]: ../../std/string/struct.String.html
+/// [`from`]: From::from
+/// [book]: ../../book/ch09-00-error-handling.html
+#[rustc_diagnostic_item = "from_trait"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(on(
+ all(_Self = "&str", T = "std::string::String"),
+ note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix",
+))]
+pub trait From<T>: Sized {
+ /// Performs the conversion.
+ #[lang = "from"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from(_: T) -> Self;
+}
+
+/// An attempted conversion that consumes `self`, which may or may not be
+/// expensive.
+///
+/// Library authors should usually not directly implement this trait,
+/// but should prefer implementing the [`TryFrom`] trait, which offers
+/// greater flexibility and provides an equivalent `TryInto`
+/// implementation for free, thanks to a blanket implementation in the
+/// standard library. For more information on this, see the
+/// documentation for [`Into`].
+///
+/// # Implementing `TryInto`
+///
+/// This suffers the same restrictions and reasoning as implementing
+/// [`Into`], see there for details.
+#[stable(feature = "try_from", since = "1.34.0")]
+pub trait TryInto<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ type Error;
+
+ /// Performs the conversion.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ fn try_into(self) -> Result<T, Self::Error>;
+}
+
+/// Simple and safe type conversions that may fail in a controlled
+/// way under some circumstances. It is the reciprocal of [`TryInto`].
+///
+/// This is useful when you are doing a type conversion that may
+/// trivially succeed but may also need special handling.
+/// For example, there is no way to convert an [`i64`] into an [`i32`]
+/// using the [`From`] trait, because an [`i64`] may contain a value
+/// that an [`i32`] cannot represent and so the conversion would lose data.
+/// This might be handled by truncating the [`i64`] to an [`i32`] (essentially
+/// giving the [`i64`]'s value modulo [`i32::MAX`]) or by simply returning
+/// [`i32::MAX`], or by some other method. The [`From`] trait is intended
+/// for perfect conversions, so the `TryFrom` trait informs the
+/// programmer when a type conversion could go bad and lets them
+/// decide how to handle it.
+///
+/// # Generic Implementations
+///
+/// - `TryFrom<T> for U` implies [`TryInto`]`<U> for T`
+/// - [`try_from`] is reflexive, which means that `TryFrom<T> for T`
+/// is implemented and cannot fail -- the associated `Error` type for
+/// calling `T::try_from()` on a value of type `T` is [`Infallible`].
+/// When the [`!`] type is stabilized [`Infallible`] and [`!`] will be
+/// equivalent.
+///
+/// `TryFrom<T>` can be implemented as follows:
+///
+/// ```
+/// use std::convert::TryFrom;
+///
+/// struct GreaterThanZero(i32);
+///
+/// impl TryFrom<i32> for GreaterThanZero {
+/// type Error = &'static str;
+///
+/// fn try_from(value: i32) -> Result<Self, Self::Error> {
+/// if value <= 0 {
+/// Err("GreaterThanZero only accepts value superior than zero!")
+/// } else {
+/// Ok(GreaterThanZero(value))
+/// }
+/// }
+/// }
+/// ```
+///
+/// # Examples
+///
+/// As described, [`i32`] implements `TryFrom<`[`i64`]`>`:
+///
+/// ```
+/// use std::convert::TryFrom;
+///
+/// let big_number = 1_000_000_000_000i64;
+/// // Silently truncates `big_number`, requires detecting
+/// // and handling the truncation after the fact.
+/// let smaller_number = big_number as i32;
+/// assert_eq!(smaller_number, -727379968);
+///
+/// // Returns an error because `big_number` is too big to
+/// // fit in an `i32`.
+/// let try_smaller_number = i32::try_from(big_number);
+/// assert!(try_smaller_number.is_err());
+///
+/// // Returns `Ok(3)`.
+/// let try_successful_smaller_number = i32::try_from(3);
+/// assert!(try_successful_smaller_number.is_ok());
+/// ```
+///
+/// [`i32::MAX`]: crate::i32::MAX
+/// [`try_from`]: TryFrom::try_from
+/// [`!`]: ../../std/primitive.never.html
+#[stable(feature = "try_from", since = "1.34.0")]
+pub trait TryFrom<T>: Sized {
+ /// The type returned in the event of a conversion error.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ type Error;
+
+ /// Performs the conversion.
+ #[stable(feature = "try_from", since = "1.34.0")]
+ fn try_from(value: T) -> Result<Self, Self::Error>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// GENERIC IMPLS
+////////////////////////////////////////////////////////////////////////////////
+
+// As lifts over &
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, U: ?Sized> AsRef<U> for &T
+where
+ T: AsRef<U>,
+{
+ fn as_ref(&self) -> &U {
+ <T as AsRef<U>>::as_ref(*self)
+ }
+}
+
+// As lifts over &mut
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, U: ?Sized> AsRef<U> for &mut T
+where
+ T: AsRef<U>,
+{
+ fn as_ref(&self) -> &U {
+ <T as AsRef<U>>::as_ref(*self)
+ }
+}
+
+// FIXME (#45742): replace the above impls for &/&mut with the following more general one:
+// // As lifts over Deref
+// impl<D: ?Sized + Deref<Target: AsRef<U>>, U: ?Sized> AsRef<U> for D {
+// fn as_ref(&self) -> &U {
+// self.deref().as_ref()
+// }
+// }
+
+// AsMut lifts over &mut
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, U: ?Sized> AsMut<U> for &mut T
+where
+ T: AsMut<U>,
+{
+ fn as_mut(&mut self) -> &mut U {
+ (*self).as_mut()
+ }
+}
+
+// FIXME (#45742): replace the above impl for &mut with the following more general one:
+// // AsMut lifts over DerefMut
+// impl<D: ?Sized + Deref<Target: AsMut<U>>, U: ?Sized> AsMut<U> for D {
+// fn as_mut(&mut self) -> &mut U {
+// self.deref_mut().as_mut()
+// }
+// }
+
+// From implies Into
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, U> Into<U> for T
+where
+ U: From<T>,
+{
+ fn into(self) -> U {
+ U::from(self)
+ }
+}
+
+// From (and thus Into) is reflexive
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> From<T> for T {
+ fn from(t: T) -> T {
+ t
+ }
+}
+
+/// **Stability note:** This impl does not yet exist, but we are
+/// "reserving space" to add it in the future. See
+/// [rust-lang/rust#64715][#64715] for details.
+///
+/// [#64715]: https://github.com/rust-lang/rust/issues/64715
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+#[allow(unused_attributes)] // FIXME(#58633): do a principled fix instead.
+#[rustc_reservation_impl = "permitting this impl would forbid us from adding \
+ `impl<T> From<!> for T` later; see rust-lang/rust#64715 for details"]
+impl<T> From<!> for T {
+ fn from(t: !) -> T {
+ t
+ }
+}
+
+// TryFrom implies TryInto
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<T, U> TryInto<U> for T
+where
+ U: TryFrom<T>,
+{
+ type Error = U::Error;
+
+ fn try_into(self) -> Result<U, U::Error> {
+ U::try_from(self)
+ }
+}
+
+// Infallible conversions are semantically equivalent to fallible conversions
+// with an uninhabited error type.
+#[stable(feature = "try_from", since = "1.34.0")]
+impl<T, U> TryFrom<U> for T
+where
+ U: Into<T>,
+{
+ type Error = Infallible;
+
+ fn try_from(value: U) -> Result<Self, Self::Error> {
+ Ok(U::into(value))
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CONCRETE IMPLS
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> AsRef<[T]> for [T] {
+ fn as_ref(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> AsMut<[T]> for [T] {
+ fn as_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<str> for str {
+ #[inline]
+ fn as_ref(&self) -> &str {
+ self
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// THE NO-ERROR ERROR TYPE
+////////////////////////////////////////////////////////////////////////////////
+
+/// The error type for errors that can never happen.
+///
+/// Since this enum has no variant, a value of this type can never actually exist.
+/// This can be useful for generic APIs that use [`Result`] and parameterize the error type,
+/// to indicate that the result is always [`Ok`].
+///
+/// For example, the [`TryFrom`] trait (conversion that returns a [`Result`])
+/// has a blanket implementation for all types where a reverse [`Into`] implementation exists.
+///
+/// ```ignore (illustrates std code, duplicating the impl in a doctest would be an error)
+/// impl<T, U> TryFrom<U> for T where U: Into<T> {
+/// type Error = Infallible;
+///
+/// fn try_from(value: U) -> Result<Self, Infallible> {
+/// Ok(U::into(value)) // Never returns `Err`
+/// }
+/// }
+/// ```
+///
+/// # Future compatibility
+///
+/// This enum has the same role as [the `!` “never” type][never],
+/// which is unstable in this version of Rust.
+/// When `!` is stabilized, we plan to make `Infallible` a type alias to it:
+///
+/// ```ignore (illustrates future std change)
+/// pub type Infallible = !;
+/// ```
+///
+/// … and eventually deprecate `Infallible`.
+///
+/// However there is one case where `!` syntax can be used
+/// before `!` is stabilized as a full-fledged type: in the position of a function’s return type.
+/// Specifically, it is possible implementations for two different function pointer types:
+///
+/// ```
+/// trait MyTrait {}
+/// impl MyTrait for fn() -> ! {}
+/// impl MyTrait for fn() -> std::convert::Infallible {}
+/// ```
+///
+/// With `Infallible` being an enum, this code is valid.
+/// However when `Infallible` becomes an alias for the never type,
+/// the two `impl`s will start to overlap
+/// and therefore will be disallowed by the language’s trait coherence rules.
+///
+/// [never]: ../../std/primitive.never.html
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+#[derive(Copy)]
+pub enum Infallible {}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl Clone for Infallible {
+ fn clone(&self) -> Infallible {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl fmt::Debug for Infallible {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl fmt::Display for Infallible {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl PartialEq for Infallible {
+ fn eq(&self, _: &Infallible) -> bool {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl Eq for Infallible {}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl PartialOrd for Infallible {
+ fn partial_cmp(&self, _other: &Self) -> Option<crate::cmp::Ordering> {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl Ord for Infallible {
+ fn cmp(&self, _other: &Self) -> crate::cmp::Ordering {
+ match *self {}
+ }
+}
+
+#[stable(feature = "convert_infallible", since = "1.34.0")]
+impl From<!> for Infallible {
+ fn from(x: !) -> Self {
+ x
+ }
+}
+
+#[stable(feature = "convert_infallible_hash", since = "1.44.0")]
+impl Hash for Infallible {
+ fn hash<H: Hasher>(&self, _: &mut H) {
+ match *self {}
+ }
+}
--- /dev/null
+use super::{From, TryFrom};
+use crate::num::TryFromIntError;
+
+mod private {
+ /// This trait being unreachable from outside the crate
+ /// prevents other implementations of the `FloatToInt` trait,
+ /// which allows potentially adding more trait methods after the trait is `#[stable]`.
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ pub trait Sealed {}
+}
+
+/// Supporting trait for inherent methods of `f32` and `f64` such as `round_unchecked_to`.
+/// Typically doesn’t need to be used directly.
+#[unstable(feature = "convert_float_to_int", issue = "67057")]
+pub trait FloatToInt<Int>: private::Sealed + Sized {
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ #[doc(hidden)]
+ unsafe fn to_int_unchecked(self) -> Int;
+}
+
+macro_rules! impl_float_to_int {
+ ( $Float: ident => $( $Int: ident )+ ) => {
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ impl private::Sealed for $Float {}
+ $(
+ #[unstable(feature = "convert_float_to_int", issue = "67057")]
+ impl FloatToInt<$Int> for $Float {
+ #[doc(hidden)]
+ #[inline]
+ unsafe fn to_int_unchecked(self) -> $Int {
+ // SAFETY: the safety contract must be upheld by the caller.
+ unsafe { crate::intrinsics::float_to_int_unchecked(self) }
+ }
+ }
+ )+
+ }
+}
+
+impl_float_to_int!(f32 => u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize);
+impl_float_to_int!(f64 => u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize);
+
+// Conversion traits for primitive integer and float types
+// Conversions T -> T are covered by a blanket impl and therefore excluded
+// Some conversions from and to usize/isize are not implemented due to portability concerns
+macro_rules! impl_from {
+ ($Small: ty, $Large: ty, #[$attr:meta], $doc: expr) => {
+ #[$attr]
+ #[doc = $doc]
+ impl From<$Small> for $Large {
+ #[inline]
+ fn from(small: $Small) -> Self {
+ small as Self
+ }
+ }
+ };
+ ($Small: ty, $Large: ty, #[$attr:meta]) => {
+ impl_from!($Small,
+ $Large,
+ #[$attr],
+ concat!("Converts `",
+ stringify!($Small),
+ "` to `",
+ stringify!($Large),
+ "` losslessly."));
+ }
+}
+
+macro_rules! impl_from_bool {
+ ($target: ty, #[$attr:meta]) => {
+ impl_from!(bool, $target, #[$attr], concat!("Converts a `bool` to a `",
+ stringify!($target), "`. The resulting value is `0` for `false` and `1` for `true`
+values.
+
+# Examples
+
+```
+assert_eq!(", stringify!($target), "::from(true), 1);
+assert_eq!(", stringify!($target), "::from(false), 0);
+```"));
+ };
+}
+
+// Bool -> Any
+impl_from_bool! { u8, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u16, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u32, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u64, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { u128, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { usize, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i8, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i16, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i32, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i64, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { i128, #[stable(feature = "from_bool", since = "1.28.0")] }
+impl_from_bool! { isize, #[stable(feature = "from_bool", since = "1.28.0")] }
+
+// Unsigned -> Unsigned
+impl_from! { u8, u16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, u32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, u128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u8, usize, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, u32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, u128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u32, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u32, u128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u64, u128, #[stable(feature = "i128", since = "1.26.0")] }
+
+// Signed -> Signed
+impl_from! { i8, i16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i8, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i8, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i8, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { i8, isize, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i16, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i16, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i16, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { i32, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { i32, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { i64, i128, #[stable(feature = "i128", since = "1.26.0")] }
+
+// Unsigned -> Signed
+impl_from! { u8, i16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u8, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u16, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u16, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u32, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] }
+impl_from! { u32, i128, #[stable(feature = "i128", since = "1.26.0")] }
+impl_from! { u64, i128, #[stable(feature = "i128", since = "1.26.0")] }
+
+// The C99 standard defines bounds on INTPTR_MIN, INTPTR_MAX, and UINTPTR_MAX
+// which imply that pointer-sized integers must be at least 16 bits:
+// https://port70.net/~nsz/c/c99/n1256.html#7.18.2.4
+impl_from! { u16, usize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] }
+impl_from! { u8, isize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] }
+impl_from! { i16, isize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] }
+
+// RISC-V defines the possibility of a 128-bit address space (RV128).
+
+// CHERI proposes 256-bit “capabilities”. Unclear if this would be relevant to usize/isize.
+// https://www.cl.cam.ac.uk/research/security/ctsrd/pdfs/20171017a-cheri-poster.pdf
+// http://www.csl.sri.com/users/neumann/2012resolve-cheri.pdf
+
+// Note: integers can only be represented with full precision in a float if
+// they fit in the significand, which is 24 bits in f32 and 53 bits in f64.
+// Lossy float conversions are not implemented at this time.
+
+// Signed -> Float
+impl_from! { i8, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i8, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i16, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i16, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { i32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+
+// Unsigned -> Float
+impl_from! { u8, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u8, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u16, f32, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u16, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+impl_from! { u32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+
+// Float -> Float
+impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] }
+
+// no possible bounds violation
+macro_rules! try_from_unbounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ impl TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(value: $source) -> Result<Self, Self::Error> {
+ Ok(value as Self)
+ }
+ }
+ )*}
+}
+
+// only negative bounds
+macro_rules! try_from_lower_bounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ impl TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(u: $source) -> Result<Self, Self::Error> {
+ if u >= 0 {
+ Ok(u as Self)
+ } else {
+ Err(TryFromIntError(()))
+ }
+ }
+ }
+ )*}
+}
+
+// unsigned to signed (only positive bound)
+macro_rules! try_from_upper_bounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ impl TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(u: $source) -> Result<Self, Self::Error> {
+ if u > (Self::MAX as $source) {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as Self)
+ }
+ }
+ }
+ )*}
+}
+
+// all other cases
+macro_rules! try_from_both_bounded {
+ ($source:ty, $($target:ty),*) => {$(
+ #[stable(feature = "try_from", since = "1.34.0")]
+ impl TryFrom<$source> for $target {
+ type Error = TryFromIntError;
+
+ /// Try to create the target number type from a source
+ /// number type. This returns an error if the source value
+ /// is outside of the range of the target type.
+ #[inline]
+ fn try_from(u: $source) -> Result<Self, Self::Error> {
+ let min = Self::MIN as $source;
+ let max = Self::MAX as $source;
+ if u < min || u > max {
+ Err(TryFromIntError(()))
+ } else {
+ Ok(u as Self)
+ }
+ }
+ }
+ )*}
+}
+
+macro_rules! rev {
+ ($mac:ident, $source:ty, $($target:ty),*) => {$(
+ $mac!($target, $source);
+ )*}
+}
+
+// intra-sign conversions
+try_from_upper_bounded!(u16, u8);
+try_from_upper_bounded!(u32, u16, u8);
+try_from_upper_bounded!(u64, u32, u16, u8);
+try_from_upper_bounded!(u128, u64, u32, u16, u8);
+
+try_from_both_bounded!(i16, i8);
+try_from_both_bounded!(i32, i16, i8);
+try_from_both_bounded!(i64, i32, i16, i8);
+try_from_both_bounded!(i128, i64, i32, i16, i8);
+
+// unsigned-to-signed
+try_from_upper_bounded!(u8, i8);
+try_from_upper_bounded!(u16, i8, i16);
+try_from_upper_bounded!(u32, i8, i16, i32);
+try_from_upper_bounded!(u64, i8, i16, i32, i64);
+try_from_upper_bounded!(u128, i8, i16, i32, i64, i128);
+
+// signed-to-unsigned
+try_from_lower_bounded!(i8, u8, u16, u32, u64, u128);
+try_from_lower_bounded!(i16, u16, u32, u64, u128);
+try_from_lower_bounded!(i32, u32, u64, u128);
+try_from_lower_bounded!(i64, u64, u128);
+try_from_lower_bounded!(i128, u128);
+try_from_both_bounded!(i16, u8);
+try_from_both_bounded!(i32, u16, u8);
+try_from_both_bounded!(i64, u32, u16, u8);
+try_from_both_bounded!(i128, u64, u32, u16, u8);
+
+// usize/isize
+try_from_upper_bounded!(usize, isize);
+try_from_lower_bounded!(isize, usize);
+
+#[cfg(target_pointer_width = "16")]
+mod ptr_try_from_impls {
+ use super::TryFromIntError;
+ use crate::convert::TryFrom;
+
+ try_from_upper_bounded!(usize, u8);
+ try_from_unbounded!(usize, u16, u32, u64, u128);
+ try_from_upper_bounded!(usize, i8, i16);
+ try_from_unbounded!(usize, i32, i64, i128);
+
+ try_from_both_bounded!(isize, u8);
+ try_from_lower_bounded!(isize, u16, u32, u64, u128);
+ try_from_both_bounded!(isize, i8);
+ try_from_unbounded!(isize, i16, i32, i64, i128);
+
+ rev!(try_from_upper_bounded, usize, u32, u64, u128);
+ rev!(try_from_lower_bounded, usize, i8, i16);
+ rev!(try_from_both_bounded, usize, i32, i64, i128);
+
+ rev!(try_from_upper_bounded, isize, u16, u32, u64, u128);
+ rev!(try_from_both_bounded, isize, i32, i64, i128);
+}
+
+#[cfg(target_pointer_width = "32")]
+mod ptr_try_from_impls {
+ use super::TryFromIntError;
+ use crate::convert::TryFrom;
+
+ try_from_upper_bounded!(usize, u8, u16);
+ try_from_unbounded!(usize, u32, u64, u128);
+ try_from_upper_bounded!(usize, i8, i16, i32);
+ try_from_unbounded!(usize, i64, i128);
+
+ try_from_both_bounded!(isize, u8, u16);
+ try_from_lower_bounded!(isize, u32, u64, u128);
+ try_from_both_bounded!(isize, i8, i16);
+ try_from_unbounded!(isize, i32, i64, i128);
+
+ rev!(try_from_unbounded, usize, u32);
+ rev!(try_from_upper_bounded, usize, u64, u128);
+ rev!(try_from_lower_bounded, usize, i8, i16, i32);
+ rev!(try_from_both_bounded, usize, i64, i128);
+
+ rev!(try_from_unbounded, isize, u16);
+ rev!(try_from_upper_bounded, isize, u32, u64, u128);
+ rev!(try_from_unbounded, isize, i32);
+ rev!(try_from_both_bounded, isize, i64, i128);
+}
+
+#[cfg(target_pointer_width = "64")]
+mod ptr_try_from_impls {
+ use super::TryFromIntError;
+ use crate::convert::TryFrom;
+
+ try_from_upper_bounded!(usize, u8, u16, u32);
+ try_from_unbounded!(usize, u64, u128);
+ try_from_upper_bounded!(usize, i8, i16, i32, i64);
+ try_from_unbounded!(usize, i128);
+
+ try_from_both_bounded!(isize, u8, u16, u32);
+ try_from_lower_bounded!(isize, u64, u128);
+ try_from_both_bounded!(isize, i8, i16, i32);
+ try_from_unbounded!(isize, i64, i128);
+
+ rev!(try_from_unbounded, usize, u32, u64);
+ rev!(try_from_upper_bounded, usize, u128);
+ rev!(try_from_lower_bounded, usize, i8, i16, i32, i64);
+ rev!(try_from_both_bounded, usize, i128);
+
+ rev!(try_from_unbounded, isize, u16, u32);
+ rev!(try_from_upper_bounded, isize, u64, u128);
+ rev!(try_from_unbounded, isize, i32, i64);
+ rev!(try_from_both_bounded, isize, i128);
+}
+
+// Conversion traits for non-zero integer types
+use crate::num::NonZeroI128;
+use crate::num::NonZeroI16;
+use crate::num::NonZeroI32;
+use crate::num::NonZeroI64;
+use crate::num::NonZeroI8;
+use crate::num::NonZeroIsize;
+use crate::num::NonZeroU128;
+use crate::num::NonZeroU16;
+use crate::num::NonZeroU32;
+use crate::num::NonZeroU64;
+use crate::num::NonZeroU8;
+use crate::num::NonZeroUsize;
+
+macro_rules! nzint_impl_from {
+ ($Small: ty, $Large: ty, #[$attr:meta], $doc: expr) => {
+ #[$attr]
+ #[doc = $doc]
+ impl From<$Small> for $Large {
+ #[inline]
+ fn from(small: $Small) -> Self {
+ // SAFETY: input type guarantees the value is non-zero
+ unsafe {
+ Self::new_unchecked(small.get().into())
+ }
+ }
+ }
+ };
+ ($Small: ty, $Large: ty, #[$attr:meta]) => {
+ nzint_impl_from!($Small,
+ $Large,
+ #[$attr],
+ concat!("Converts `",
+ stringify!($Small),
+ "` to `",
+ stringify!($Large),
+ "` losslessly."));
+ }
+}
+
+// Non-zero Unsigned -> Non-zero Unsigned
+nzint_impl_from! { NonZeroU8, NonZeroU16, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroU32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroU64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroUsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroU32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroU64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroUsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroU64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU64, NonZeroU128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+
+// Non-zero Signed -> Non-zero Signed
+nzint_impl_from! { NonZeroI8, NonZeroI16, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI8, NonZeroIsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI16, NonZeroIsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI32, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI32, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroI64, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+
+// NonZero UnSigned -> Non-zero Signed
+nzint_impl_from! { NonZeroU8, NonZeroI16, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU8, NonZeroIsize, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroI32, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU16, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroI64, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU32, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+nzint_impl_from! { NonZeroU64, NonZeroI128, #[stable(feature = "nz_int_conv", since = "1.41.0")] }
+
+macro_rules! nzint_impl_try_from_int {
+ ($Int: ty, $NonZeroInt: ty, #[$attr:meta], $doc: expr) => {
+ #[$attr]
+ #[doc = $doc]
+ impl TryFrom<$Int> for $NonZeroInt {
+ type Error = TryFromIntError;
+
+ #[inline]
+ fn try_from(value: $Int) -> Result<Self, Self::Error> {
+ Self::new(value).ok_or(TryFromIntError(()))
+ }
+ }
+ };
+ ($Int: ty, $NonZeroInt: ty, #[$attr:meta]) => {
+ nzint_impl_try_from_int!($Int,
+ $NonZeroInt,
+ #[$attr],
+ concat!("Attempts to convert `",
+ stringify!($Int),
+ "` to `",
+ stringify!($NonZeroInt),
+ "`."));
+ }
+}
+
+// Int -> Non-zero Int
+nzint_impl_try_from_int! { u8, NonZeroU8, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u16, NonZeroU16, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u32, NonZeroU32, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u64, NonZeroU64, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { u128, NonZeroU128, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { usize, NonZeroUsize, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i8, NonZeroI8, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i16, NonZeroI16, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i32, NonZeroI32, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i64, NonZeroI64, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { i128, NonZeroI128, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+nzint_impl_try_from_int! { isize, NonZeroIsize, #[stable(feature = "nzint_try_from_int_conv", since = "1.46.0")] }
+
+macro_rules! nzint_impl_try_from_nzint {
+ ($From:ty => $To:ty, $doc: expr) => {
+ #[stable(feature = "nzint_try_from_nzint_conv", since = "1.49.0")]
+ #[doc = $doc]
+ impl TryFrom<$From> for $To {
+ type Error = TryFromIntError;
+
+ #[inline]
+ fn try_from(value: $From) -> Result<Self, Self::Error> {
+ TryFrom::try_from(value.get()).map(|v| {
+ // SAFETY: $From is a NonZero type, so v is not zero.
+ unsafe { Self::new_unchecked(v) }
+ })
+ }
+ }
+ };
+ ($To:ty: $($From: ty),*) => {$(
+ nzint_impl_try_from_nzint!(
+ $From => $To,
+ concat!(
+ "Attempts to convert `",
+ stringify!($From),
+ "` to `",
+ stringify!($To),
+ "`.",
+ )
+ );
+ )*};
+}
+
+// Non-zero int -> non-zero unsigned int
+nzint_impl_try_from_nzint! { NonZeroU8: NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU16: NonZeroI8, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU32: NonZeroI8, NonZeroI16, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU64: NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroU128: NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroUsize: NonZeroI8, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroIsize }
+
+// Non-zero int -> non-zero signed int
+nzint_impl_try_from_nzint! { NonZeroI8: NonZeroU8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI16: NonZeroU16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI32: NonZeroU32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI64: NonZeroU64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroI128: NonZeroU128, NonZeroUsize, NonZeroIsize }
+nzint_impl_try_from_nzint! { NonZeroIsize: NonZeroU16, NonZeroU32, NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize }
--- /dev/null
+//! The `Default` trait for types which may have meaningful default values.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+/// A trait for giving a type a useful default value.
+///
+/// Sometimes, you want to fall back to some kind of default value, and
+/// don't particularly care what it is. This comes up often with `struct`s
+/// that define a set of options:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+/// ```
+///
+/// How can we define some default values? You can use `Default`:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Default)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+///
+/// fn main() {
+/// let options: SomeOptions = Default::default();
+/// }
+/// ```
+///
+/// Now, you get all of the default values. Rust implements `Default` for various primitives types.
+///
+/// If you want to override a particular option, but still retain the other defaults:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// # #[derive(Default)]
+/// # struct SomeOptions {
+/// # foo: i32,
+/// # bar: f32,
+/// # }
+/// fn main() {
+/// let options = SomeOptions { foo: 42, ..Default::default() };
+/// }
+/// ```
+///
+/// ## Derivable
+///
+/// This trait can be used with `#[derive]` if all of the type's fields implement
+/// `Default`. When `derive`d, it will use the default value for each field's type.
+///
+/// ## How can I implement `Default`?
+///
+/// Provide an implementation for the `default()` method that returns the value of
+/// your type that should be the default:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// enum Kind {
+/// A,
+/// B,
+/// C,
+/// }
+///
+/// impl Default for Kind {
+/// fn default() -> Self { Kind::A }
+/// }
+/// ```
+///
+/// # Examples
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Default)]
+/// struct SomeOptions {
+/// foo: i32,
+/// bar: f32,
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Default: Sized {
+ /// Returns the "default value" for a type.
+ ///
+ /// Default values are often some kind of initial value, identity value, or anything else that
+ /// may make sense as a default.
+ ///
+ /// # Examples
+ ///
+ /// Using built-in default values:
+ ///
+ /// ```
+ /// let i: i8 = Default::default();
+ /// let (x, y): (Option<String>, f64) = Default::default();
+ /// let (a, b, (c, d)): (i32, u32, (bool, bool)) = Default::default();
+ /// ```
+ ///
+ /// Making your own:
+ ///
+ /// ```
+ /// # #[allow(dead_code)]
+ /// enum Kind {
+ /// A,
+ /// B,
+ /// C,
+ /// }
+ ///
+ /// impl Default for Kind {
+ /// fn default() -> Self { Kind::A }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn default() -> Self;
+}
+
+/// Return the default value of a type according to the `Default` trait.
+///
+/// The type to return is inferred from context; this is equivalent to
+/// `Default::default()` but shorter to type.
+///
+/// For example:
+/// ```
+/// #![feature(default_free_fn)]
+///
+/// use std::default::default;
+///
+/// #[derive(Default)]
+/// struct AppConfig {
+/// foo: FooConfig,
+/// bar: BarConfig,
+/// }
+///
+/// #[derive(Default)]
+/// struct FooConfig {
+/// foo: i32,
+/// }
+///
+/// #[derive(Default)]
+/// struct BarConfig {
+/// bar: f32,
+/// baz: u8,
+/// }
+///
+/// fn main() {
+/// let options = AppConfig {
+/// foo: default(),
+/// bar: BarConfig {
+/// bar: 10.1,
+/// ..default()
+/// },
+/// };
+/// }
+/// ```
+#[unstable(feature = "default_free_fn", issue = "73014")]
+#[inline]
+pub fn default<T: Default>() -> T {
+ Default::default()
+}
+
+/// Derive macro generating an impl of the trait `Default`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics)]
+pub macro Default($item:item) {
+ /* compiler built-in */
+}
+
+macro_rules! default_impl {
+ ($t:ty, $v:expr, $doc:tt) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Default for $t {
+ #[inline]
+ #[doc = $doc]
+ fn default() -> $t { $v }
+ }
+ }
+}
+
+default_impl! { (), (), "Returns the default value of `()`" }
+default_impl! { bool, false, "Returns the default value of `false`" }
+default_impl! { char, '\x00', "Returns the default value of `\\x00`" }
+
+default_impl! { usize, 0, "Returns the default value of `0`" }
+default_impl! { u8, 0, "Returns the default value of `0`" }
+default_impl! { u16, 0, "Returns the default value of `0`" }
+default_impl! { u32, 0, "Returns the default value of `0`" }
+default_impl! { u64, 0, "Returns the default value of `0`" }
+default_impl! { u128, 0, "Returns the default value of `0`" }
+
+default_impl! { isize, 0, "Returns the default value of `0`" }
+default_impl! { i8, 0, "Returns the default value of `0`" }
+default_impl! { i16, 0, "Returns the default value of `0`" }
+default_impl! { i32, 0, "Returns the default value of `0`" }
+default_impl! { i64, 0, "Returns the default value of `0`" }
+default_impl! { i128, 0, "Returns the default value of `0`" }
+
+default_impl! { f32, 0.0f32, "Returns the default value of `0.0`" }
+default_impl! { f64, 0.0f64, "Returns the default value of `0.0`" }
--- /dev/null
+#![stable(feature = "", since = "1.30.0")]
+#![allow(non_camel_case_types)]
+
+//! Utilities related to foreign function interface (FFI) bindings.
+
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::ops::{Deref, DerefMut};
+
+/// Equivalent to C's `void` type when used as a [pointer].
+///
+/// In essence, `*const c_void` is equivalent to C's `const void*`
+/// and `*mut c_void` is equivalent to C's `void*`. That said, this is
+/// *not* the same as C's `void` return type, which is Rust's `()` type.
+///
+/// To model pointers to opaque types in FFI, until `extern type` is
+/// stabilized, it is recommended to use a newtype wrapper around an empty
+/// byte array. See the [Nomicon] for details.
+///
+/// One could use `std::os::raw::c_void` if they want to support old Rust
+/// compiler down to 1.1.0. After Rust 1.30.0, it was re-exported by
+/// this definition. For more information, please read [RFC 2521].
+///
+/// [pointer]: ../../std/primitive.pointer.html
+/// [Nomicon]: https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs
+/// [RFC 2521]: https://github.com/rust-lang/rfcs/blob/master/text/2521-c_void-reunification.md
+// N.B., for LLVM to recognize the void pointer type and by extension
+// functions like malloc(), we need to have it represented as i8* in
+// LLVM bitcode. The enum used here ensures this and prevents misuse
+// of the "raw" type by only having private variants. We need two
+// variants, because the compiler complains about the repr attribute
+// otherwise and we need at least one variant as otherwise the enum
+// would be uninhabited and at least dereferencing such pointers would
+// be UB.
+#[repr(u8)]
+#[stable(feature = "core_c_void", since = "1.30.0")]
+pub enum c_void {
+ #[unstable(
+ feature = "c_void_variant",
+ reason = "temporary implementation detail",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ __variant1,
+ #[unstable(
+ feature = "c_void_variant",
+ reason = "temporary implementation detail",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ __variant2,
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for c_void {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("c_void")
+ }
+}
+
+/// Basic implementation of a `va_list`.
+// The name is WIP, using `VaListImpl` for now.
+#[cfg(any(
+ all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_arch = "wasm32",
+ target_arch = "asmjs",
+ windows
+))]
+#[repr(transparent)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ ptr: *mut c_void,
+
+ // Invariant over `'f`, so each `VaListImpl<'f>` object is tied to
+ // the region of the function it's defined in
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+#[cfg(any(
+ all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_arch = "wasm32",
+ target_arch = "asmjs",
+ windows
+))]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> fmt::Debug for VaListImpl<'f> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "va_list* {:p}", self.ptr)
+ }
+}
+
+/// AArch64 ABI implementation of a `va_list`. See the
+/// [AArch64 Procedure Call Standard] for more details.
+///
+/// [AArch64 Procedure Call Standard]:
+/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf
+#[cfg(all(
+ target_arch = "aarch64",
+ not(any(target_os = "macos", target_os = "ios")),
+ not(windows)
+))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ stack: *mut c_void,
+ gr_top: *mut c_void,
+ vr_top: *mut c_void,
+ gr_offs: i32,
+ vr_offs: i32,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+/// PowerPC ABI implementation of a `va_list`.
+#[cfg(all(target_arch = "powerpc", not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ gpr: u8,
+ fpr: u8,
+ reserved: u16,
+ overflow_arg_area: *mut c_void,
+ reg_save_area: *mut c_void,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+/// x86_64 ABI implementation of a `va_list`.
+#[cfg(all(target_arch = "x86_64", not(windows)))]
+#[repr(C)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+#[lang = "va_list"]
+pub struct VaListImpl<'f> {
+ gp_offset: i32,
+ fp_offset: i32,
+ overflow_arg_area: *mut c_void,
+ reg_save_area: *mut c_void,
+ _marker: PhantomData<&'f mut &'f c_void>,
+}
+
+/// A wrapper for a `va_list`
+#[repr(transparent)]
+#[derive(Debug)]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+pub struct VaList<'a, 'f: 'a> {
+ #[cfg(any(
+ all(
+ not(target_arch = "aarch64"),
+ not(target_arch = "powerpc"),
+ not(target_arch = "x86_64")
+ ),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_arch = "wasm32",
+ target_arch = "asmjs",
+ windows
+ ))]
+ inner: VaListImpl<'f>,
+
+ #[cfg(all(
+ any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs"),
+ not(windows)
+ ))]
+ inner: &'a mut VaListImpl<'f>,
+
+ _marker: PhantomData<&'a mut VaListImpl<'f>>,
+}
+
+#[cfg(any(
+ all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")),
+ all(target_arch = "aarch64", any(target_os = "macos", target_os = "ios")),
+ target_arch = "wasm32",
+ target_arch = "asmjs",
+ windows
+))]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> VaListImpl<'f> {
+ /// Convert a `VaListImpl` into a `VaList` that is binary-compatible with C's `va_list`.
+ #[inline]
+ pub fn as_va_list<'a>(&'a mut self) -> VaList<'a, 'f> {
+ VaList { inner: VaListImpl { ..*self }, _marker: PhantomData }
+ }
+}
+
+#[cfg(all(
+ any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"),
+ any(not(target_arch = "aarch64"), not(any(target_os = "macos", target_os = "ios"))),
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs"),
+ not(windows)
+))]
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> VaListImpl<'f> {
+ /// Convert a `VaListImpl` into a `VaList` that is binary-compatible with C's `va_list`.
+ #[inline]
+ pub fn as_va_list<'a>(&'a mut self) -> VaList<'a, 'f> {
+ VaList { inner: self, _marker: PhantomData }
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'a, 'f: 'a> Deref for VaList<'a, 'f> {
+ type Target = VaListImpl<'f>;
+
+ #[inline]
+ fn deref(&self) -> &VaListImpl<'f> {
+ &self.inner
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'a, 'f: 'a> DerefMut for VaList<'a, 'f> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut VaListImpl<'f> {
+ &mut self.inner
+ }
+}
+
+// The VaArgSafe trait needs to be used in public interfaces, however, the trait
+// itself must not be allowed to be used outside this module. Allowing users to
+// implement the trait for a new type (thereby allowing the va_arg intrinsic to
+// be used on a new type) is likely to cause undefined behavior.
+//
+// FIXME(dlrobertson): In order to use the VaArgSafe trait in a public interface
+// but also ensure it cannot be used elsewhere, the trait needs to be public
+// within a private module. Once RFC 2145 has been implemented look into
+// improving this.
+mod sealed_trait {
+ /// Trait which permits the allowed types to be used with [super::VaListImpl::arg].
+ #[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+ )]
+ pub trait VaArgSafe {}
+}
+
+macro_rules! impl_va_arg_safe {
+ ($($t:ty),+) => {
+ $(
+ #[unstable(feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930")]
+ impl sealed_trait::VaArgSafe for $t {}
+ )+
+ }
+}
+
+impl_va_arg_safe! {i8, i16, i32, i64, usize}
+impl_va_arg_safe! {u8, u16, u32, u64, isize}
+impl_va_arg_safe! {f64}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<T> sealed_trait::VaArgSafe for *mut T {}
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<T> sealed_trait::VaArgSafe for *const T {}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> VaListImpl<'f> {
+ /// Advance to the next arg.
+ #[inline]
+ pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T {
+ // SAFETY: the caller must uphold the safety contract for `va_arg`.
+ unsafe { va_arg(self) }
+ }
+
+ /// Copies the `va_list` at the current location.
+ pub unsafe fn with_copy<F, R>(&self, f: F) -> R
+ where
+ F: for<'copy> FnOnce(VaList<'copy, 'f>) -> R,
+ {
+ let mut ap = self.clone();
+ let ret = f(ap.as_va_list());
+ // SAFETY: the caller must uphold the safety contract for `va_end`.
+ unsafe {
+ va_end(&mut ap);
+ }
+ ret
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> Clone for VaListImpl<'f> {
+ #[inline]
+ fn clone(&self) -> Self {
+ let mut dest = crate::mem::MaybeUninit::uninit();
+ // SAFETY: we write to the `MaybeUninit`, thus it is initialized and `assume_init` is legal
+ unsafe {
+ va_copy(dest.as_mut_ptr(), self);
+ dest.assume_init()
+ }
+ }
+}
+
+#[unstable(
+ feature = "c_variadic",
+ reason = "the `c_variadic` feature has not been properly tested on \
+ all supported platforms",
+ issue = "44930"
+)]
+impl<'f> Drop for VaListImpl<'f> {
+ fn drop(&mut self) {
+ // FIXME: this should call `va_end`, but there's no clean way to
+ // guarantee that `drop` always gets inlined into its caller,
+ // so the `va_end` would get directly called from the same function as
+ // the corresponding `va_copy`. `man va_end` states that C requires this,
+ // and LLVM basically follows the C semantics, so we need to make sure
+ // that `va_end` is always called from the same function as `va_copy`.
+ // For more details, see https://github.com/rust-lang/rust/pull/59625
+ // and https://llvm.org/docs/LangRef.html#llvm-va-end-intrinsic.
+ //
+ // This works for now, since `va_end` is a no-op on all current LLVM targets.
+ }
+}
+
+extern "rust-intrinsic" {
+ /// Destroy the arglist `ap` after initialization with `va_start` or
+ /// `va_copy`.
+ fn va_end(ap: &mut VaListImpl<'_>);
+
+ /// Copies the current location of arglist `src` to the arglist `dst`.
+ fn va_copy<'f>(dest: *mut VaListImpl<'f>, src: &VaListImpl<'f>);
+
+ /// Loads an argument of type `T` from the `va_list` `ap` and increment the
+ /// argument `ap` points to.
+ fn va_arg<T: sealed_trait::VaArgSafe>(ap: &mut VaListImpl<'_>) -> T;
+}
--- /dev/null
+#![allow(unused_imports)]
+
+use crate::fmt::{self, Debug, Formatter};
+
+struct PadAdapter<'buf, 'state> {
+ buf: &'buf mut (dyn fmt::Write + 'buf),
+ state: &'state mut PadAdapterState,
+}
+
+struct PadAdapterState {
+ on_newline: bool,
+}
+
+impl Default for PadAdapterState {
+ fn default() -> Self {
+ PadAdapterState { on_newline: true }
+ }
+}
+
+impl<'buf, 'state> PadAdapter<'buf, 'state> {
+ fn wrap<'slot, 'fmt: 'buf + 'slot>(
+ fmt: &'fmt mut fmt::Formatter<'_>,
+ slot: &'slot mut Option<Self>,
+ state: &'state mut PadAdapterState,
+ ) -> fmt::Formatter<'slot> {
+ fmt.wrap_buf(move |buf| {
+ *slot = Some(PadAdapter { buf, state });
+ slot.as_mut().unwrap()
+ })
+ }
+}
+
+impl fmt::Write for PadAdapter<'_, '_> {
+ fn write_str(&mut self, mut s: &str) -> fmt::Result {
+ while !s.is_empty() {
+ if self.state.on_newline {
+ self.buf.write_str(" ")?;
+ }
+
+ let split = match s.find('\n') {
+ Some(pos) => {
+ self.state.on_newline = true;
+ pos + 1
+ }
+ None => {
+ self.state.on_newline = false;
+ s.len()
+ }
+ };
+ self.buf.write_str(&s[..split])?;
+ s = &s[split..];
+ }
+
+ Ok(())
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted struct as a part of your
+/// [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_struct`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo {
+/// bar: i32,
+/// baz: String,
+/// }
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_struct("Foo")
+/// .field("bar", &self.bar)
+/// .field("baz", &self.baz)
+/// .finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() }),
+/// "Foo { bar: 10, baz: \"Hello World\" }",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugStruct<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ has_fields: bool,
+}
+
+pub(super) fn debug_struct_new<'a, 'b>(
+ fmt: &'a mut fmt::Formatter<'b>,
+ name: &str,
+) -> DebugStruct<'a, 'b> {
+ let result = fmt.write_str(name);
+ DebugStruct { fmt, result, has_fields: false }
+}
+
+impl<'a, 'b: 'a> DebugStruct<'a, 'b> {
+ /// Adds a new field to the generated struct output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Bar {
+ /// bar: i32,
+ /// another: String,
+ /// }
+ ///
+ /// impl fmt::Debug for Bar {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_struct("Bar")
+ /// .field("bar", &self.bar) // We add `bar` field.
+ /// .field("another", &self.another) // We add `another` field.
+ /// // We even add a field which doesn't exist (because why not?).
+ /// .field("not_existing_field", &1)
+ /// .finish() // We're good to go!
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Bar { bar: 10, another: "Hello World".to_string() }),
+ /// "Bar { bar: 10, another: \"Hello World\", not_existing_field: 1 }",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn field(&mut self, name: &str, value: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str(" {\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot, &mut state);
+ writer.write_str(name)?;
+ writer.write_str(": ")?;
+ value.fmt(&mut writer)?;
+ writer.write_str(",\n")
+ } else {
+ let prefix = if self.has_fields { ", " } else { " { " };
+ self.fmt.write_str(prefix)?;
+ self.fmt.write_str(name)?;
+ self.fmt.write_str(": ")?;
+ value.fmt(self.fmt)
+ }
+ });
+
+ self.has_fields = true;
+ self
+ }
+
+ /// Marks the struct as non-exhaustive, indicating to the reader that there are some other
+ /// fields that are not shown in the debug representation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![feature(debug_non_exhaustive)]
+ /// use std::fmt;
+ ///
+ /// struct Bar {
+ /// bar: i32,
+ /// hidden: f32,
+ /// }
+ ///
+ /// impl fmt::Debug for Bar {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_struct("Bar")
+ /// .field("bar", &self.bar)
+ /// .finish_non_exhaustive() // Show that some other field(s) exist.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Bar { bar: 10, hidden: 1.0 }),
+ /// "Bar { bar: 10, .. }",
+ /// );
+ /// ```
+ #[unstable(feature = "debug_non_exhaustive", issue = "67364")]
+ pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
+ self.result = self.result.and_then(|_| {
+ // Draw non-exhaustive dots (`..`), and open brace if necessary (no fields).
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str(" {\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot, &mut state);
+ writer.write_str("..\n")?;
+ } else {
+ if self.has_fields {
+ self.fmt.write_str(", ..")?;
+ } else {
+ self.fmt.write_str(" { ..")?;
+ }
+ }
+ if self.is_pretty() {
+ self.fmt.write_str("}")?
+ } else {
+ self.fmt.write_str(" }")?;
+ }
+ Ok(())
+ });
+ self.result
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Bar {
+ /// bar: i32,
+ /// baz: String,
+ /// }
+ ///
+ /// impl fmt::Debug for Bar {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_struct("Bar")
+ /// .field("bar", &self.bar)
+ /// .field("baz", &self.baz)
+ /// .finish() // You need to call it to "finish" the
+ /// // struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Bar { bar: 10, baz: "Hello World".to_string() }),
+ /// "Bar { bar: 10, baz: \"Hello World\" }",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ if self.has_fields {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() { self.fmt.write_str("}") } else { self.fmt.write_str(" }") }
+ });
+ }
+ self.result
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted tuple as a part of your
+/// [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_tuple`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(i32, String);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_tuple("Foo")
+/// .field(&self.0)
+/// .field(&self.1)
+/// .finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(10, "Hello World".to_string())),
+/// "Foo(10, \"Hello World\")",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugTuple<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ fields: usize,
+ empty_name: bool,
+}
+
+pub(super) fn debug_tuple_new<'a, 'b>(
+ fmt: &'a mut fmt::Formatter<'b>,
+ name: &str,
+) -> DebugTuple<'a, 'b> {
+ let result = fmt.write_str(name);
+ DebugTuple { fmt, result, fields: 0, empty_name: name.is_empty() }
+}
+
+impl<'a, 'b: 'a> DebugTuple<'a, 'b> {
+ /// Adds a new field to the generated tuple struct output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32, String);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_tuple("Foo")
+ /// .field(&self.0) // We add the first field.
+ /// .field(&self.1) // We add the second field.
+ /// .finish() // We're good to go!
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(10, "Hello World".to_string())),
+ /// "Foo(10, \"Hello World\")",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn field(&mut self, value: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() {
+ if self.fields == 0 {
+ self.fmt.write_str("(\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot, &mut state);
+ value.fmt(&mut writer)?;
+ writer.write_str(",\n")
+ } else {
+ let prefix = if self.fields == 0 { "(" } else { ", " };
+ self.fmt.write_str(prefix)?;
+ value.fmt(self.fmt)
+ }
+ });
+
+ self.fields += 1;
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32, String);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_tuple("Foo")
+ /// .field(&self.0)
+ /// .field(&self.1)
+ /// .finish() // You need to call it to "finish" the
+ /// // tuple formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(10, "Hello World".to_string())),
+ /// "Foo(10, \"Hello World\")",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ if self.fields > 0 {
+ self.result = self.result.and_then(|_| {
+ if self.fields == 1 && self.empty_name && !self.is_pretty() {
+ self.fmt.write_str(",")?;
+ }
+ self.fmt.write_str(")")
+ });
+ }
+ self.result
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
+
+struct DebugInner<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ has_fields: bool,
+}
+
+impl<'a, 'b: 'a> DebugInner<'a, 'b> {
+ fn entry(&mut self, entry: &dyn fmt::Debug) {
+ self.result = self.result.and_then(|_| {
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str("\n")?;
+ }
+ let mut slot = None;
+ let mut state = Default::default();
+ let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot, &mut state);
+ entry.fmt(&mut writer)?;
+ writer.write_str(",\n")
+ } else {
+ if self.has_fields {
+ self.fmt.write_str(", ")?
+ }
+ entry.fmt(self.fmt)
+ }
+ });
+
+ self.has_fields = true;
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted set of items as a part
+/// of your [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_set`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(Vec<i32>);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_set().entries(self.0.iter()).finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(vec![10, 11])),
+/// "{10, 11}",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugSet<'a, 'b: 'a> {
+ inner: DebugInner<'a, 'b>,
+}
+
+pub(super) fn debug_set_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugSet<'a, 'b> {
+ let result = fmt.write_str("{");
+ DebugSet { inner: DebugInner { fmt, result, has_fields: false } }
+}
+
+impl<'a, 'b: 'a> DebugSet<'a, 'b> {
+ /// Adds a new entry to the set output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entry(&self.0) // Adds the first "entry".
+ /// .entry(&self.1) // Adds the second "entry".
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "{[10, 11], [12, 13]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
+ self.inner.entry(entry);
+ self
+ }
+
+ /// Adds the contents of an iterator of entries to the set output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entries(self.0.iter()) // Adds the first "entry".
+ /// .entries(self.1.iter()) // Adds the second "entry".
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "{10, 11, 12, 13}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entries<D, I>(&mut self, entries: I) -> &mut Self
+ where
+ D: fmt::Debug,
+ I: IntoIterator<Item = D>,
+ {
+ for entry in entries {
+ self.entry(&entry);
+ }
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entries(self.0.iter())
+ /// .finish() // Ends the struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11])),
+ /// "{10, 11}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ self.inner.result.and_then(|_| self.inner.fmt.write_str("}"))
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted list of items as a part
+/// of your [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_list`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(Vec<i32>);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_list().entries(self.0.iter()).finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(vec![10, 11])),
+/// "[10, 11]",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugList<'a, 'b: 'a> {
+ inner: DebugInner<'a, 'b>,
+}
+
+pub(super) fn debug_list_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugList<'a, 'b> {
+ let result = fmt.write_str("[");
+ DebugList { inner: DebugInner { fmt, result, has_fields: false } }
+}
+
+impl<'a, 'b: 'a> DebugList<'a, 'b> {
+ /// Adds a new entry to the list output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_list()
+ /// .entry(&self.0) // We add the first "entry".
+ /// .entry(&self.1) // We add the second "entry".
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "[[10, 11], [12, 13]]",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
+ self.inner.entry(entry);
+ self
+ }
+
+ /// Adds the contents of an iterator of entries to the list output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>, Vec<u32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_list()
+ /// .entries(self.0.iter())
+ /// .entries(self.1.iter())
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])),
+ /// "[10, 11, 12, 13]",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entries<D, I>(&mut self, entries: I) -> &mut Self
+ where
+ D: fmt::Debug,
+ I: IntoIterator<Item = D>,
+ {
+ for entry in entries {
+ self.entry(&entry);
+ }
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_list()
+ /// .entries(self.0.iter())
+ /// .finish() // Ends the struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![10, 11])),
+ /// "[10, 11]",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ self.inner.result.and_then(|_| self.inner.fmt.write_str("]"))
+ }
+}
+
+/// A struct to help with [`fmt::Debug`](Debug) implementations.
+///
+/// This is useful when you wish to output a formatted map as a part of your
+/// [`Debug::fmt`] implementation.
+///
+/// This can be constructed by the [`Formatter::debug_map`] method.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Foo(Vec<(String, i32)>);
+///
+/// impl fmt::Debug for Foo {
+/// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+/// fmt.debug_map().entries(self.0.iter().map(|&(ref k, ref v)| (k, v))).finish()
+/// }
+/// }
+///
+/// assert_eq!(
+/// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+/// "{\"A\": 10, \"B\": 11}",
+/// );
+/// ```
+#[must_use = "must eventually call `finish()` on Debug builders"]
+#[allow(missing_debug_implementations)]
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub struct DebugMap<'a, 'b: 'a> {
+ fmt: &'a mut fmt::Formatter<'b>,
+ result: fmt::Result,
+ has_fields: bool,
+ has_key: bool,
+ // The state of newlines is tracked between keys and values
+ state: PadAdapterState,
+}
+
+pub(super) fn debug_map_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugMap<'a, 'b> {
+ let result = fmt.write_str("{");
+ DebugMap { fmt, result, has_fields: false, has_key: false, state: Default::default() }
+}
+
+impl<'a, 'b: 'a> DebugMap<'a, 'b> {
+ /// Adds a new entry to the map output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .entry(&"whole", &self.0) // We add the "whole" entry.
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entry(&mut self, key: &dyn fmt::Debug, value: &dyn fmt::Debug) -> &mut Self {
+ self.key(key).value(value)
+ }
+
+ /// Adds the key part of a new entry to the map output.
+ ///
+ /// This method, together with `value`, is an alternative to `entry` that
+ /// can be used when the complete entry isn't known upfront. Prefer the `entry`
+ /// method when it's possible to use.
+ ///
+ /// # Panics
+ ///
+ /// `key` must be called before `value` and each call to `key` must be followed
+ /// by a corresponding call to `value`. Otherwise this method will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .key(&"whole").value(&self.0) // We add the "whole" entry.
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_map_key_value", since = "1.42.0")]
+ pub fn key(&mut self, key: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ assert!(
+ !self.has_key,
+ "attempted to begin a new map entry \
+ without completing the previous one"
+ );
+
+ if self.is_pretty() {
+ if !self.has_fields {
+ self.fmt.write_str("\n")?;
+ }
+ let mut slot = None;
+ self.state = Default::default();
+ let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot, &mut self.state);
+ key.fmt(&mut writer)?;
+ writer.write_str(": ")?;
+ } else {
+ if self.has_fields {
+ self.fmt.write_str(", ")?
+ }
+ key.fmt(self.fmt)?;
+ self.fmt.write_str(": ")?;
+ }
+
+ self.has_key = true;
+ Ok(())
+ });
+
+ self
+ }
+
+ /// Adds the value part of a new entry to the map output.
+ ///
+ /// This method, together with `key`, is an alternative to `entry` that
+ /// can be used when the complete entry isn't known upfront. Prefer the `entry`
+ /// method when it's possible to use.
+ ///
+ /// # Panics
+ ///
+ /// `key` must be called before `value` and each call to `key` must be followed
+ /// by a corresponding call to `value`. Otherwise this method will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .key(&"whole").value(&self.0) // We add the "whole" entry.
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_map_key_value", since = "1.42.0")]
+ pub fn value(&mut self, value: &dyn fmt::Debug) -> &mut Self {
+ self.result = self.result.and_then(|_| {
+ assert!(self.has_key, "attempted to format a map value before its key");
+
+ if self.is_pretty() {
+ let mut slot = None;
+ let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot, &mut self.state);
+ value.fmt(&mut writer)?;
+ writer.write_str(",\n")?;
+ } else {
+ value.fmt(self.fmt)?;
+ }
+
+ self.has_key = false;
+ Ok(())
+ });
+
+ self.has_fields = true;
+ self
+ }
+
+ /// Adds the contents of an iterator of entries to the map output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// // We map our vec so each entries' first field will become
+ /// // the "key".
+ /// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v)))
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"A\": 10, \"B\": 11}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn entries<K, V, I>(&mut self, entries: I) -> &mut Self
+ where
+ K: fmt::Debug,
+ V: fmt::Debug,
+ I: IntoIterator<Item = (K, V)>,
+ {
+ for (k, v) in entries {
+ self.entry(&k, &v);
+ }
+ self
+ }
+
+ /// Finishes output and returns any error encountered.
+ ///
+ /// # Panics
+ ///
+ /// `key` must be called before `value` and each call to `key` must be followed
+ /// by a corresponding call to `value`. Otherwise this method will panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// fmt.debug_map()
+ /// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v)))
+ /// .finish() // Ends the struct formatting.
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// "{\"A\": 10, \"B\": 11}",
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn finish(&mut self) -> fmt::Result {
+ self.result.and_then(|_| {
+ assert!(!self.has_key, "attempted to finish a map with a partial entry");
+
+ self.fmt.write_str("}")
+ })
+ }
+
+ fn is_pretty(&self) -> bool {
+ self.fmt.alternate()
+ }
+}
--- /dev/null
+use crate::fmt::{Debug, Display, Formatter, LowerExp, Result, UpperExp};
+use crate::mem::MaybeUninit;
+use crate::num::flt2dec;
+
+// Don't inline this so callers don't use the stack space this function
+// requires unless they have to.
+#[inline(never)]
+fn float_to_decimal_common_exact<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ precision: usize,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let mut buf: [MaybeUninit<u8>; 1024] = MaybeUninit::uninit_array(); // enough for f32 and f64
+ let mut parts: [MaybeUninit<flt2dec::Part<'_>>; 4] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_exact_fixed_str(
+ flt2dec::strategy::grisu::format_exact,
+ *num,
+ sign,
+ precision,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Don't inline this so callers that call both this and the above won't wind
+// up using the combined stack space of both functions in some cases.
+#[inline(never)]
+fn float_to_decimal_common_shortest<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ precision: usize,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ // enough for f32 and f64
+ let mut buf: [MaybeUninit<u8>; flt2dec::MAX_SIG_DIGITS] = MaybeUninit::uninit_array();
+ let mut parts: [MaybeUninit<flt2dec::Part<'_>>; 4] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_shortest_str(
+ flt2dec::strategy::grisu::format_shortest,
+ *num,
+ sign,
+ precision,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Common code of floating point Debug and Display.
+fn float_to_decimal_common<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ negative_zero: bool,
+ min_precision: usize,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let force_sign = fmt.sign_plus();
+ let sign = match (force_sign, negative_zero) {
+ (false, false) => flt2dec::Sign::Minus,
+ (false, true) => flt2dec::Sign::MinusRaw,
+ (true, false) => flt2dec::Sign::MinusPlus,
+ (true, true) => flt2dec::Sign::MinusPlusRaw,
+ };
+
+ if let Some(precision) = fmt.precision {
+ float_to_decimal_common_exact(fmt, num, sign, precision)
+ } else {
+ float_to_decimal_common_shortest(fmt, num, sign, min_precision)
+ }
+}
+
+// Don't inline this so callers don't use the stack space this function
+// requires unless they have to.
+#[inline(never)]
+fn float_to_exponential_common_exact<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ precision: usize,
+ upper: bool,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let mut buf: [MaybeUninit<u8>; 1024] = MaybeUninit::uninit_array(); // enough for f32 and f64
+ let mut parts: [MaybeUninit<flt2dec::Part<'_>>; 6] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_exact_exp_str(
+ flt2dec::strategy::grisu::format_exact,
+ *num,
+ sign,
+ precision,
+ upper,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Don't inline this so callers that call both this and the above won't wind
+// up using the combined stack space of both functions in some cases.
+#[inline(never)]
+fn float_to_exponential_common_shortest<T>(
+ fmt: &mut Formatter<'_>,
+ num: &T,
+ sign: flt2dec::Sign,
+ upper: bool,
+) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ // enough for f32 and f64
+ let mut buf: [MaybeUninit<u8>; flt2dec::MAX_SIG_DIGITS] = MaybeUninit::uninit_array();
+ let mut parts: [MaybeUninit<flt2dec::Part<'_>>; 6] = MaybeUninit::uninit_array();
+ let formatted = flt2dec::to_shortest_exp_str(
+ flt2dec::strategy::grisu::format_shortest,
+ *num,
+ sign,
+ (0, 0),
+ upper,
+ &mut buf,
+ &mut parts,
+ );
+ fmt.pad_formatted_parts(&formatted)
+}
+
+// Common code of floating point LowerExp and UpperExp.
+fn float_to_exponential_common<T>(fmt: &mut Formatter<'_>, num: &T, upper: bool) -> Result
+where
+ T: flt2dec::DecodableFloat,
+{
+ let force_sign = fmt.sign_plus();
+ let sign = match force_sign {
+ false => flt2dec::Sign::Minus,
+ true => flt2dec::Sign::MinusPlus,
+ };
+
+ if let Some(precision) = fmt.precision {
+ // 1 integral digit + `precision` fractional digits = `precision + 1` total digits
+ float_to_exponential_common_exact(fmt, num, sign, precision + 1, upper)
+ } else {
+ float_to_exponential_common_shortest(fmt, num, sign, upper)
+ }
+}
+
+macro_rules! floating {
+ ($ty:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Debug for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_decimal_common(fmt, self, true, 1)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Display for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_decimal_common(fmt, self, false, 0)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl LowerExp for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_exponential_common(fmt, self, false)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl UpperExp for $ty {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ float_to_exponential_common(fmt, self, true)
+ }
+ }
+ };
+}
+
+floating! { f32 }
+floating! { f64 }
--- /dev/null
+//! Utilities for formatting and printing strings.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cell::{Cell, Ref, RefCell, RefMut, UnsafeCell};
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::num::flt2dec;
+use crate::ops::Deref;
+use crate::result;
+use crate::str;
+
+mod builders;
+mod float;
+mod num;
+
+#[stable(feature = "fmt_flags_align", since = "1.28.0")]
+/// Possible alignments returned by `Formatter::align`
+#[derive(Debug)]
+pub enum Alignment {
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ /// Indication that contents should be left-aligned.
+ Left,
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ /// Indication that contents should be right-aligned.
+ Right,
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ /// Indication that contents should be center-aligned.
+ Center,
+}
+
+#[stable(feature = "debug_builders", since = "1.2.0")]
+pub use self::builders::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
+
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+#[doc(hidden)]
+pub mod rt {
+ pub mod v1;
+}
+
+/// The type returned by formatter methods.
+///
+/// # Examples
+///
+/// ```
+/// use std::fmt;
+///
+/// #[derive(Debug)]
+/// struct Triangle {
+/// a: f32,
+/// b: f32,
+/// c: f32
+/// }
+///
+/// impl fmt::Display for Triangle {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "({}, {}, {})", self.a, self.b, self.c)
+/// }
+/// }
+///
+/// let pythagorean_triple = Triangle { a: 3.0, b: 4.0, c: 5.0 };
+///
+/// assert_eq!(format!("{}", pythagorean_triple), "(3, 4, 5)");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub type Result = result::Result<(), Error>;
+
+/// The error type which is returned from formatting a message into a stream.
+///
+/// This type does not support transmission of an error other than that an error
+/// occurred. Any extra information must be arranged to be transmitted through
+/// some other means.
+///
+/// An important thing to remember is that the type `fmt::Error` should not be
+/// confused with [`std::io::Error`] or [`std::error::Error`], which you may also
+/// have in scope.
+///
+/// [`std::io::Error`]: ../../std/io/struct.Error.html
+/// [`std::error::Error`]: ../../std/error/trait.Error.html
+///
+/// # Examples
+///
+/// ```rust
+/// use std::fmt::{self, write};
+///
+/// let mut output = String::new();
+/// if let Err(fmt::Error) = write(&mut output, format_args!("Hello {}!", "world")) {
+/// panic!("An error occurred");
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct Error;
+
+/// A trait for writing or formatting into Unicode-accepting buffers or streams.
+///
+/// This trait only accepts UTF-8–encoded data and is not [flushable]. If you only
+/// want to accept Unicode and you don't need flushing, you should implement this trait;
+/// otherwise you should implement [`std::io::Write`].
+///
+/// [`std::io::Write`]: ../../std/io/trait.Write.html
+/// [flushable]: ../../std/io/trait.Write.html#tymethod.flush
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Write {
+ /// Writes a string slice into this writer, returning whether the write
+ /// succeeded.
+ ///
+ /// This method can only succeed if the entire string slice was successfully
+ /// written, and this method will not return until all data has been
+ /// written or an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an instance of [`Error`] on error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt::{Error, Write};
+ ///
+ /// fn writer<W: Write>(f: &mut W, s: &str) -> Result<(), Error> {
+ /// f.write_str(s)
+ /// }
+ ///
+ /// let mut buf = String::new();
+ /// writer(&mut buf, "hola").unwrap();
+ /// assert_eq!(&buf, "hola");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write_str(&mut self, s: &str) -> Result;
+
+ /// Writes a [`char`] into this writer, returning whether the write succeeded.
+ ///
+ /// A single [`char`] may be encoded as more than one byte.
+ /// This method can only succeed if the entire byte sequence was successfully
+ /// written, and this method will not return until all data has been
+ /// written or an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// This function will return an instance of [`Error`] on error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt::{Error, Write};
+ ///
+ /// fn writer<W: Write>(f: &mut W, c: char) -> Result<(), Error> {
+ /// f.write_char(c)
+ /// }
+ ///
+ /// let mut buf = String::new();
+ /// writer(&mut buf, 'a').unwrap();
+ /// writer(&mut buf, 'b').unwrap();
+ /// assert_eq!(&buf, "ab");
+ /// ```
+ #[stable(feature = "fmt_write_char", since = "1.1.0")]
+ fn write_char(&mut self, c: char) -> Result {
+ self.write_str(c.encode_utf8(&mut [0; 4]))
+ }
+
+ /// Glue for usage of the [`write!`] macro with implementors of this trait.
+ ///
+ /// This method should generally not be invoked manually, but rather through
+ /// the [`write!`] macro itself.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt::{Error, Write};
+ ///
+ /// fn writer<W: Write>(f: &mut W, s: &str) -> Result<(), Error> {
+ /// f.write_fmt(format_args!("{}", s))
+ /// }
+ ///
+ /// let mut buf = String::new();
+ /// writer(&mut buf, "world").unwrap();
+ /// assert_eq!(&buf, "world");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write_fmt(mut self: &mut Self, args: Arguments<'_>) -> Result {
+ write(&mut self, args)
+ }
+}
+
+#[stable(feature = "fmt_write_blanket_impl", since = "1.4.0")]
+impl<W: Write + ?Sized> Write for &mut W {
+ fn write_str(&mut self, s: &str) -> Result {
+ (**self).write_str(s)
+ }
+
+ fn write_char(&mut self, c: char) -> Result {
+ (**self).write_char(c)
+ }
+
+ fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
+ (**self).write_fmt(args)
+ }
+}
+
+/// Configuration for formatting.
+///
+/// A `Formatter` represents various options related to formatting. Users do not
+/// construct `Formatter`s directly; a mutable reference to one is passed to
+/// the `fmt` method of all formatting traits, like [`Debug`] and [`Display`].
+///
+/// To interact with a `Formatter`, you'll call various methods to change the
+/// various options related to formatting. For examples, please see the
+/// documentation of the methods defined on `Formatter` below.
+#[allow(missing_debug_implementations)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Formatter<'a> {
+ flags: u32,
+ fill: char,
+ align: rt::v1::Alignment,
+ width: Option<usize>,
+ precision: Option<usize>,
+
+ buf: &'a mut (dyn Write + 'a),
+}
+
+// NB. Argument is essentially an optimized partially applied formatting function,
+// equivalent to `exists T.(&T, fn(&T, &mut Formatter<'_>) -> Result`.
+
+extern "C" {
+ type Opaque;
+}
+
+/// This struct represents the generic "argument" which is taken by the Xprintf
+/// family of functions. It contains a function to format the given value. At
+/// compile time it is ensured that the function and the value have the correct
+/// types, and then this struct is used to canonicalize arguments to one type.
+#[derive(Copy, Clone)]
+#[allow(missing_debug_implementations)]
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+#[doc(hidden)]
+pub struct ArgumentV1<'a> {
+ value: &'a Opaque,
+ formatter: fn(&Opaque, &mut Formatter<'_>) -> Result,
+}
+
+// This guarantees a single stable value for the function pointer associated with
+// indices/counts in the formatting infrastructure.
+//
+// Note that a function defined as such would not be correct as functions are
+// always tagged unnamed_addr with the current lowering to LLVM IR, so their
+// address is not considered important to LLVM and as such the as_usize cast
+// could have been miscompiled. In practice, we never call as_usize on non-usize
+// containing data (as a matter of static generation of the formatting
+// arguments), so this is merely an additional check.
+//
+// We primarily want to ensure that the function pointer at `USIZE_MARKER` has
+// an address corresponding *only* to functions that also take `&usize` as their
+// first argument. The read_volatile here ensures that we can safely ready out a
+// usize from the passed reference and that this address does not point at a
+// non-usize taking function.
+#[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+static USIZE_MARKER: fn(&usize, &mut Formatter<'_>) -> Result = |ptr, _| {
+ // SAFETY: ptr is a reference
+ let _v: usize = unsafe { crate::ptr::read_volatile(ptr) };
+ loop {}
+};
+
+impl<'a> ArgumentV1<'a> {
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn new<'b, T>(x: &'b T, f: fn(&T, &mut Formatter<'_>) -> Result) -> ArgumentV1<'b> {
+ // SAFETY: `mem::transmute(x)` is safe because
+ // 1. `&'b T` keeps the lifetime it originated with `'b`
+ // (so as to not have an unbounded lifetime)
+ // 2. `&'b T` and `&'b Opaque` have the same memory layout
+ // (when `T` is `Sized`, as it is here)
+ // `mem::transmute(f)` is safe since `fn(&T, &mut Formatter<'_>) -> Result`
+ // and `fn(&Opaque, &mut Formatter<'_>) -> Result` have the same ABI
+ // (as long as `T` is `Sized`)
+ unsafe { ArgumentV1 { formatter: mem::transmute(f), value: mem::transmute(x) } }
+ }
+
+ #[doc(hidden)]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn from_usize(x: &usize) -> ArgumentV1<'_> {
+ ArgumentV1::new(x, USIZE_MARKER)
+ }
+
+ fn as_usize(&self) -> Option<usize> {
+ if self.formatter as usize == USIZE_MARKER as usize {
+ // SAFETY: The `formatter` field is only set to USIZE_MARKER if
+ // the value is a usize, so this is safe
+ Some(unsafe { *(self.value as *const _ as *const usize) })
+ } else {
+ None
+ }
+ }
+}
+
+// flags available in the v1 format of format_args
+#[derive(Copy, Clone)]
+enum FlagV1 {
+ SignPlus,
+ SignMinus,
+ Alternate,
+ SignAwareZeroPad,
+ DebugLowerHex,
+ DebugUpperHex,
+}
+
+impl<'a> Arguments<'a> {
+ /// When using the format_args!() macro, this function is used to generate the
+ /// Arguments structure.
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn new_v1(pieces: &'a [&'static str], args: &'a [ArgumentV1<'a>]) -> Arguments<'a> {
+ Arguments { pieces, fmt: None, args }
+ }
+
+ /// This function is used to specify nonstandard formatting parameters.
+ /// The `pieces` array must be at least as long as `fmt` to construct
+ /// a valid Arguments structure. Also, any `Count` within `fmt` that is
+ /// `CountIsParam` or `CountIsNextParam` has to point to an argument
+ /// created with `argumentusize`. However, failing to do so doesn't cause
+ /// unsafety, but will ignore invalid .
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn new_v1_formatted(
+ pieces: &'a [&'static str],
+ args: &'a [ArgumentV1<'a>],
+ fmt: &'a [rt::v1::Argument],
+ ) -> Arguments<'a> {
+ Arguments { pieces, fmt: Some(fmt), args }
+ }
+
+ /// Estimates the length of the formatted text.
+ ///
+ /// This is intended to be used for setting initial `String` capacity
+ /// when using `format!`. Note: this is neither the lower nor upper bound.
+ #[doc(hidden)]
+ #[inline]
+ #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "none")]
+ pub fn estimated_capacity(&self) -> usize {
+ let pieces_length: usize = self.pieces.iter().map(|x| x.len()).sum();
+
+ if self.args.is_empty() {
+ pieces_length
+ } else if self.pieces[0] == "" && pieces_length < 16 {
+ // If the format string starts with an argument,
+ // don't preallocate anything, unless length
+ // of pieces is significant.
+ 0
+ } else {
+ // There are some arguments, so any additional push
+ // will reallocate the string. To avoid that,
+ // we're "pre-doubling" the capacity here.
+ pieces_length.checked_mul(2).unwrap_or(0)
+ }
+ }
+}
+
+/// This structure represents a safely precompiled version of a format string
+/// and its arguments. This cannot be generated at runtime because it cannot
+/// safely be done, so no constructors are given and the fields are private
+/// to prevent modification.
+///
+/// The [`format_args!`] macro will safely create an instance of this structure.
+/// The macro validates the format string at compile-time so usage of the
+/// [`write()`] and [`format()`] functions can be safely performed.
+///
+/// You can use the `Arguments<'a>` that [`format_args!`] returns in `Debug`
+/// and `Display` contexts as seen below. The example also shows that `Debug`
+/// and `Display` format to the same thing: the interpolated format string
+/// in `format_args!`.
+///
+/// ```rust
+/// let debug = format!("{:?}", format_args!("{} foo {:?}", 1, 2));
+/// let display = format!("{}", format_args!("{} foo {:?}", 1, 2));
+/// assert_eq!("1 foo 2", display);
+/// assert_eq!(display, debug);
+/// ```
+///
+/// [`format()`]: ../../std/fmt/fn.format.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone)]
+pub struct Arguments<'a> {
+ // Format string pieces to print.
+ pieces: &'a [&'static str],
+
+ // Placeholder specs, or `None` if all specs are default (as in "{}{}").
+ fmt: Option<&'a [rt::v1::Argument]>,
+
+ // Dynamic arguments for interpolation, to be interleaved with string
+ // pieces. (Every argument is preceded by a string piece.)
+ args: &'a [ArgumentV1<'a>],
+}
+
+impl<'a> Arguments<'a> {
+ /// Get the formatted string, if it has no arguments to be formatted.
+ ///
+ /// This can be used to avoid allocations in the most trivial case.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(fmt_as_str)]
+ ///
+ /// use core::fmt::Arguments;
+ ///
+ /// fn write_str(_: &str) { /* ... */ }
+ ///
+ /// fn write_fmt(args: &Arguments) {
+ /// if let Some(s) = args.as_str() {
+ /// write_str(s)
+ /// } else {
+ /// write_str(&args.to_string());
+ /// }
+ /// }
+ /// ```
+ ///
+ /// ```rust
+ /// #![feature(fmt_as_str)]
+ ///
+ /// assert_eq!(format_args!("hello").as_str(), Some("hello"));
+ /// assert_eq!(format_args!("").as_str(), Some(""));
+ /// assert_eq!(format_args!("{}", 1).as_str(), None);
+ /// ```
+ #[unstable(feature = "fmt_as_str", issue = "74442")]
+ #[inline]
+ pub fn as_str(&self) -> Option<&'static str> {
+ match (self.pieces, self.args) {
+ ([], []) => Some(""),
+ ([s], []) => Some(s),
+ _ => None,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for Arguments<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ Display::fmt(self, fmt)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for Arguments<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
+ write(fmt.buf, *self)
+ }
+}
+
+/// `?` formatting.
+///
+/// `Debug` should format the output in a programmer-facing, debugging context.
+///
+/// Generally speaking, you should just `derive` a `Debug` implementation.
+///
+/// When used with the alternate format specifier `#?`, the output is pretty-printed.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// This trait can be used with `#[derive]` if all fields implement `Debug`. When
+/// `derive`d for structs, it will use the name of the `struct`, then `{`, then a
+/// comma-separated list of each field's name and `Debug` value, then `}`. For
+/// `enum`s, it will use the name of the variant and, if applicable, `(`, then the
+/// `Debug` values of the fields, then `)`.
+///
+/// # Stability
+///
+/// Derived `Debug` formats are not stable, and so may change with future Rust
+/// versions. Additionally, `Debug` implementations of types provided by the
+/// standard library (`libstd`, `libcore`, `liballoc`, etc.) are not stable, and
+/// may also change with future Rust versions.
+///
+/// # Examples
+///
+/// Deriving an implementation:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {:?}", origin), "The origin is: Point { x: 0, y: 0 }");
+/// ```
+///
+/// Manually implementing:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl fmt::Debug for Point {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// f.debug_struct("Point")
+/// .field("x", &self.x)
+/// .field("y", &self.y)
+/// .finish()
+/// }
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {:?}", origin), "The origin is: Point { x: 0, y: 0 }");
+/// ```
+///
+/// There are a number of helper methods on the [`Formatter`] struct to help you with manual
+/// implementations, such as [`debug_struct`].
+///
+/// `Debug` implementations using either `derive` or the debug builder API
+/// on [`Formatter`] support pretty-printing using the alternate flag: `{:#?}`.
+///
+/// [`debug_struct`]: Formatter::debug_struct
+///
+/// Pretty-printing with `#?`:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {:#?}", origin),
+/// "The origin is: Point {
+/// x: 0,
+/// y: 0,
+/// }");
+/// ```
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ on(
+ crate_local,
+ label = "`{Self}` cannot be formatted using `{{:?}}`",
+ note = "add `#[derive(Debug)]` or manually implement `{Debug}`"
+ ),
+ message = "`{Self}` doesn't implement `{Debug}`",
+ label = "`{Self}` cannot be formatted using `{{:?}}` because it doesn't implement `{Debug}`"
+)]
+#[doc(alias = "{:?}")]
+#[rustc_diagnostic_item = "debug_trait"]
+pub trait Debug {
+ /// Formats the value using the given formatter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Position {
+ /// longitude: f32,
+ /// latitude: f32,
+ /// }
+ ///
+ /// impl fmt::Debug for Position {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// f.debug_tuple("")
+ /// .field(&self.longitude)
+ /// .field(&self.latitude)
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// let position = Position { longitude: 1.987, latitude: 2.983 };
+ /// assert_eq!(format!("{:?}", position), "(1.987, 2.983)");
+ ///
+ /// assert_eq!(format!("{:#?}", position), "(
+ /// 1.987,
+ /// 2.983,
+ /// )");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+// Separate module to reexport the macro `Debug` from prelude without the trait `Debug`.
+pub(crate) mod macros {
+ /// Derive macro generating an impl of the trait `Debug`.
+ #[rustc_builtin_macro]
+ #[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+ #[allow_internal_unstable(core_intrinsics)]
+ pub macro Debug($item:item) {
+ /* compiler built-in */
+ }
+}
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(inline)]
+pub use macros::Debug;
+
+/// Format trait for an empty format, `{}`.
+///
+/// `Display` is similar to [`Debug`], but `Display` is for user-facing
+/// output, and so cannot be derived.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Implementing `Display` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl fmt::Display for Point {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// write!(f, "({}, {})", self.x, self.y)
+/// }
+/// }
+///
+/// let origin = Point { x: 0, y: 0 };
+///
+/// assert_eq!(format!("The origin is: {}", origin), "The origin is: (0, 0)");
+/// ```
+#[rustc_on_unimplemented(
+ on(
+ _Self = "std::path::Path",
+ label = "`{Self}` cannot be formatted with the default formatter; call `.display()` on it",
+ note = "call `.display()` or `.to_string_lossy()` to safely print paths, \
+ as they may contain non-Unicode data"
+ ),
+ message = "`{Self}` doesn't implement `{Display}`",
+ label = "`{Self}` cannot be formatted with the default formatter",
+ note = "in format strings you may be able to use `{{:?}}` (or {{:#?}} for pretty-print) instead"
+)]
+#[doc(alias = "{}")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Display {
+ /// Formats the value using the given formatter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Position {
+ /// longitude: f32,
+ /// latitude: f32,
+ /// }
+ ///
+ /// impl fmt::Display for Position {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "({}, {})", self.longitude, self.latitude)
+ /// }
+ /// }
+ ///
+ /// assert_eq!("(1.987, 2.983)",
+ /// format!("{}", Position { longitude: 1.987, latitude: 2.983, }));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `o` formatting.
+///
+/// The `Octal` trait should format its output as a number in base-8.
+///
+/// For primitive signed integers (`i8` to `i128`, and `isize`),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0o` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '52' in octal
+///
+/// assert_eq!(format!("{:o}", x), "52");
+/// assert_eq!(format!("{:#o}", x), "0o52");
+///
+/// assert_eq!(format!("{:o}", -16), "37777777760");
+/// ```
+///
+/// Implementing `Octal` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Octal for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::Octal::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// assert_eq!(format!("l as octal is: {:o}", l), "l as octal is: 11");
+///
+/// assert_eq!(format!("l as octal is: {:#06o}", l), "l as octal is: 0o0011");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Octal {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `b` formatting.
+///
+/// The `Binary` trait should format its output as a number in binary.
+///
+/// For primitive signed integers ([`i8`] to [`i128`], and [`isize`]),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0b` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with [`i32`]:
+///
+/// ```
+/// let x = 42; // 42 is '101010' in binary
+///
+/// assert_eq!(format!("{:b}", x), "101010");
+/// assert_eq!(format!("{:#b}", x), "0b101010");
+///
+/// assert_eq!(format!("{:b}", -16), "11111111111111111111111111110000");
+/// ```
+///
+/// Implementing `Binary` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Binary for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::Binary::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(107);
+///
+/// assert_eq!(format!("l as binary is: {:b}", l), "l as binary is: 1101011");
+///
+/// assert_eq!(
+/// format!("l as binary is: {:#032b}", l),
+/// "l as binary is: 0b000000000000000000000001101011"
+/// );
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Binary {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `x` formatting.
+///
+/// The `LowerHex` trait should format its output as a number in hexadecimal, with `a` through `f`
+/// in lower case.
+///
+/// For primitive signed integers (`i8` to `i128`, and `isize`),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0x` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '2a' in hex
+///
+/// assert_eq!(format!("{:x}", x), "2a");
+/// assert_eq!(format!("{:#x}", x), "0x2a");
+///
+/// assert_eq!(format!("{:x}", -16), "fffffff0");
+/// ```
+///
+/// Implementing `LowerHex` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::LowerHex for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::LowerHex::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(9);
+///
+/// assert_eq!(format!("l as hex is: {:x}", l), "l as hex is: 9");
+///
+/// assert_eq!(format!("l as hex is: {:#010x}", l), "l as hex is: 0x00000009");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait LowerHex {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `X` formatting.
+///
+/// The `UpperHex` trait should format its output as a number in hexadecimal, with `A` through `F`
+/// in upper case.
+///
+/// For primitive signed integers (`i8` to `i128`, and `isize`),
+/// negative values are formatted as the two’s complement representation.
+///
+/// The alternate flag, `#`, adds a `0x` in front of the output.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with `i32`:
+///
+/// ```
+/// let x = 42; // 42 is '2A' in hex
+///
+/// assert_eq!(format!("{:X}", x), "2A");
+/// assert_eq!(format!("{:#X}", x), "0x2A");
+///
+/// assert_eq!(format!("{:X}", -16), "FFFFFFF0");
+/// ```
+///
+/// Implementing `UpperHex` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::UpperHex for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = self.0;
+///
+/// fmt::UpperHex::fmt(&val, f) // delegate to i32's implementation
+/// }
+/// }
+///
+/// let l = Length(i32::MAX);
+///
+/// assert_eq!(format!("l as hex is: {:X}", l), "l as hex is: 7FFFFFFF");
+///
+/// assert_eq!(format!("l as hex is: {:#010X}", l), "l as hex is: 0x7FFFFFFF");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait UpperHex {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `p` formatting.
+///
+/// The `Pointer` trait should format its output as a memory location. This is commonly presented
+/// as hexadecimal.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with `&i32`:
+///
+/// ```
+/// let x = &42;
+///
+/// let address = format!("{:p}", x); // this produces something like '0x7f06092ac6d0'
+/// ```
+///
+/// Implementing `Pointer` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::Pointer for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// // use `as` to convert to a `*const T`, which implements Pointer, which we can use
+///
+/// let ptr = self as *const Self;
+/// fmt::Pointer::fmt(&ptr, f)
+/// }
+/// }
+///
+/// let l = Length(42);
+///
+/// println!("l is in memory here: {:p}", l);
+///
+/// let l_ptr = format!("{:018p}", l);
+/// assert_eq!(l_ptr.len(), 18);
+/// assert_eq!(&l_ptr[..2], "0x");
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "pointer_trait"]
+pub trait Pointer {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "pointer_trait_fmt"]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `e` formatting.
+///
+/// The `LowerExp` trait should format its output in scientific notation with a lower-case `e`.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with `f64`:
+///
+/// ```
+/// let x = 42.0; // 42.0 is '4.2e1' in scientific notation
+///
+/// assert_eq!(format!("{:e}", x), "4.2e1");
+/// ```
+///
+/// Implementing `LowerExp` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::LowerExp for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = f64::from(self.0);
+/// fmt::LowerExp::fmt(&val, f) // delegate to f64's implementation
+/// }
+/// }
+///
+/// let l = Length(100);
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {:e}", l),
+/// "l in scientific notation is: 1e2"
+/// );
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {:05e}", l),
+/// "l in scientific notation is: 001e2"
+/// );
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait LowerExp {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// `E` formatting.
+///
+/// The `UpperExp` trait should format its output in scientific notation with an upper-case `E`.
+///
+/// For more information on formatters, see [the module-level documentation][self].
+///
+/// # Examples
+///
+/// Basic usage with `f64`:
+///
+/// ```
+/// let x = 42.0; // 42.0 is '4.2E1' in scientific notation
+///
+/// assert_eq!(format!("{:E}", x), "4.2E1");
+/// ```
+///
+/// Implementing `UpperExp` on a type:
+///
+/// ```
+/// use std::fmt;
+///
+/// struct Length(i32);
+///
+/// impl fmt::UpperExp for Length {
+/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+/// let val = f64::from(self.0);
+/// fmt::UpperExp::fmt(&val, f) // delegate to f64's implementation
+/// }
+/// }
+///
+/// let l = Length(100);
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {:E}", l),
+/// "l in scientific notation is: 1E2"
+/// );
+///
+/// assert_eq!(
+/// format!("l in scientific notation is: {:05E}", l),
+/// "l in scientific notation is: 001E2"
+/// );
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait UpperExp {
+ /// Formats the value using the given formatter.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result;
+}
+
+/// The `write` function takes an output stream, and an `Arguments` struct
+/// that can be precompiled with the `format_args!` macro.
+///
+/// The arguments will be formatted according to the specified format string
+/// into the output stream provided.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let mut output = String::new();
+/// fmt::write(&mut output, format_args!("Hello {}!", "world"))
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// Please note that using [`write!`] might be preferable. Example:
+///
+/// ```
+/// use std::fmt::Write;
+///
+/// let mut output = String::new();
+/// write!(&mut output, "Hello {}!", "world")
+/// .expect("Error occurred while trying to write in String");
+/// assert_eq!(output, "Hello world!");
+/// ```
+///
+/// [`write!`]: crate::write!
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn write(output: &mut dyn Write, args: Arguments<'_>) -> Result {
+ let mut formatter = Formatter {
+ flags: 0,
+ width: None,
+ precision: None,
+ buf: output,
+ align: rt::v1::Alignment::Unknown,
+ fill: ' ',
+ };
+
+ let mut idx = 0;
+
+ match args.fmt {
+ None => {
+ // We can use default formatting parameters for all arguments.
+ for (arg, piece) in args.args.iter().zip(args.pieces.iter()) {
+ formatter.buf.write_str(*piece)?;
+ (arg.formatter)(arg.value, &mut formatter)?;
+ idx += 1;
+ }
+ }
+ Some(fmt) => {
+ // Every spec has a corresponding argument that is preceded by
+ // a string piece.
+ for (arg, piece) in fmt.iter().zip(args.pieces.iter()) {
+ formatter.buf.write_str(*piece)?;
+ run(&mut formatter, arg, &args.args)?;
+ idx += 1;
+ }
+ }
+ }
+
+ // There can be only one trailing string piece left.
+ if let Some(piece) = args.pieces.get(idx) {
+ formatter.buf.write_str(*piece)?;
+ }
+
+ Ok(())
+}
+
+fn run(fmt: &mut Formatter<'_>, arg: &rt::v1::Argument, args: &[ArgumentV1<'_>]) -> Result {
+ fmt.fill = arg.format.fill;
+ fmt.align = arg.format.align;
+ fmt.flags = arg.format.flags;
+ fmt.width = getcount(args, &arg.format.width);
+ fmt.precision = getcount(args, &arg.format.precision);
+
+ // Extract the correct argument
+ let value = args[arg.position];
+
+ // Then actually do some printing
+ (value.formatter)(value.value, fmt)
+}
+
+fn getcount(args: &[ArgumentV1<'_>], cnt: &rt::v1::Count) -> Option<usize> {
+ match *cnt {
+ rt::v1::Count::Is(n) => Some(n),
+ rt::v1::Count::Implied => None,
+ rt::v1::Count::Param(i) => args[i].as_usize(),
+ }
+}
+
+/// Padding after the end of something. Returned by `Formatter::padding`.
+#[must_use = "don't forget to write the post padding"]
+struct PostPadding {
+ fill: char,
+ padding: usize,
+}
+
+impl PostPadding {
+ fn new(fill: char, padding: usize) -> PostPadding {
+ PostPadding { fill, padding }
+ }
+
+ /// Write this post padding.
+ fn write(self, buf: &mut dyn Write) -> Result {
+ for _ in 0..self.padding {
+ buf.write_char(self.fill)?;
+ }
+ Ok(())
+ }
+}
+
+impl<'a> Formatter<'a> {
+ fn wrap_buf<'b, 'c, F>(&'b mut self, wrap: F) -> Formatter<'c>
+ where
+ 'b: 'c,
+ F: FnOnce(&'b mut (dyn Write + 'b)) -> &'c mut (dyn Write + 'c),
+ {
+ Formatter {
+ // We want to change this
+ buf: wrap(self.buf),
+
+ // And preserve these
+ flags: self.flags,
+ fill: self.fill,
+ align: self.align,
+ width: self.width,
+ precision: self.precision,
+ }
+ }
+
+ // Helper methods used for padding and processing formatting arguments that
+ // all formatting traits can use.
+
+ /// Performs the correct padding for an integer which has already been
+ /// emitted into a str. The str should *not* contain the sign for the
+ /// integer, that will be added by this method.
+ ///
+ /// # Arguments
+ ///
+ /// * is_nonnegative - whether the original integer was either positive or zero.
+ /// * prefix - if the '#' character (Alternate) is provided, this
+ /// is the prefix to put in front of the number.
+ /// * buf - the byte array that the number has been formatted into
+ ///
+ /// This function will correctly account for the flags provided as well as
+ /// the minimum width. It will not take precision into account.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo { nb: i32 };
+ ///
+ /// impl Foo {
+ /// fn new(nb: i32) -> Foo {
+ /// Foo {
+ /// nb,
+ /// }
+ /// }
+ /// }
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// // We need to remove "-" from the number output.
+ /// let tmp = self.nb.abs().to_string();
+ ///
+ /// formatter.pad_integral(self.nb > 0, "Foo ", &tmp)
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{}", Foo::new(2)), "2");
+ /// assert_eq!(&format!("{}", Foo::new(-1)), "-1");
+ /// assert_eq!(&format!("{:#}", Foo::new(-1)), "-Foo 1");
+ /// assert_eq!(&format!("{:0>#8}", Foo::new(-1)), "00-Foo 1");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pad_integral(&mut self, is_nonnegative: bool, prefix: &str, buf: &str) -> Result {
+ let mut width = buf.len();
+
+ let mut sign = None;
+ if !is_nonnegative {
+ sign = Some('-');
+ width += 1;
+ } else if self.sign_plus() {
+ sign = Some('+');
+ width += 1;
+ }
+
+ let prefix = if self.alternate() {
+ width += prefix.chars().count();
+ Some(prefix)
+ } else {
+ None
+ };
+
+ // Writes the sign if it exists, and then the prefix if it was requested
+ #[inline(never)]
+ fn write_prefix(f: &mut Formatter<'_>, sign: Option<char>, prefix: Option<&str>) -> Result {
+ if let Some(c) = sign {
+ f.buf.write_char(c)?;
+ }
+ if let Some(prefix) = prefix { f.buf.write_str(prefix) } else { Ok(()) }
+ }
+
+ // The `width` field is more of a `min-width` parameter at this point.
+ match self.width {
+ // If there's no minimum length requirements then we can just
+ // write the bytes.
+ None => {
+ write_prefix(self, sign, prefix)?;
+ self.buf.write_str(buf)
+ }
+ // Check if we're over the minimum width, if so then we can also
+ // just write the bytes.
+ Some(min) if width >= min => {
+ write_prefix(self, sign, prefix)?;
+ self.buf.write_str(buf)
+ }
+ // The sign and prefix goes before the padding if the fill character
+ // is zero
+ Some(min) if self.sign_aware_zero_pad() => {
+ let old_fill = crate::mem::replace(&mut self.fill, '0');
+ let old_align = crate::mem::replace(&mut self.align, rt::v1::Alignment::Right);
+ write_prefix(self, sign, prefix)?;
+ let post_padding = self.padding(min - width, rt::v1::Alignment::Right)?;
+ self.buf.write_str(buf)?;
+ post_padding.write(self.buf)?;
+ self.fill = old_fill;
+ self.align = old_align;
+ Ok(())
+ }
+ // Otherwise, the sign and prefix goes after the padding
+ Some(min) => {
+ let post_padding = self.padding(min - width, rt::v1::Alignment::Right)?;
+ write_prefix(self, sign, prefix)?;
+ self.buf.write_str(buf)?;
+ post_padding.write(self.buf)
+ }
+ }
+ }
+
+ /// This function takes a string slice and emits it to the internal buffer
+ /// after applying the relevant formatting flags specified. The flags
+ /// recognized for generic strings are:
+ ///
+ /// * width - the minimum width of what to emit
+ /// * fill/align - what to emit and where to emit it if the string
+ /// provided needs to be padded
+ /// * precision - the maximum length to emit, the string is truncated if it
+ /// is longer than this length
+ ///
+ /// Notably this function ignores the `flag` parameters.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// formatter.pad("Foo")
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:<4}", Foo), "Foo ");
+ /// assert_eq!(&format!("{:0>4}", Foo), "0Foo");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pad(&mut self, s: &str) -> Result {
+ // Make sure there's a fast path up front
+ if self.width.is_none() && self.precision.is_none() {
+ return self.buf.write_str(s);
+ }
+ // The `precision` field can be interpreted as a `max-width` for the
+ // string being formatted.
+ let s = if let Some(max) = self.precision {
+ // If our string is longer that the precision, then we must have
+ // truncation. However other flags like `fill`, `width` and `align`
+ // must act as always.
+ if let Some((i, _)) = s.char_indices().nth(max) {
+ // LLVM here can't prove that `..i` won't panic `&s[..i]`, but
+ // we know that it can't panic. Use `get` + `unwrap_or` to avoid
+ // `unsafe` and otherwise don't emit any panic-related code
+ // here.
+ s.get(..i).unwrap_or(&s)
+ } else {
+ &s
+ }
+ } else {
+ &s
+ };
+ // The `width` field is more of a `min-width` parameter at this point.
+ match self.width {
+ // If we're under the maximum length, and there's no minimum length
+ // requirements, then we can just emit the string
+ None => self.buf.write_str(s),
+ // If we're under the maximum width, check if we're over the minimum
+ // width, if so it's as easy as just emitting the string.
+ Some(width) if s.chars().count() >= width => self.buf.write_str(s),
+ // If we're under both the maximum and the minimum width, then fill
+ // up the minimum width with the specified string + some alignment.
+ Some(width) => {
+ let align = rt::v1::Alignment::Left;
+ let post_padding = self.padding(width - s.chars().count(), align)?;
+ self.buf.write_str(s)?;
+ post_padding.write(self.buf)
+ }
+ }
+ }
+
+ /// Write the pre-padding and return the unwritten post-padding. Callers are
+ /// responsible for ensuring post-padding is written after the thing that is
+ /// being padded.
+ fn padding(
+ &mut self,
+ padding: usize,
+ default: rt::v1::Alignment,
+ ) -> result::Result<PostPadding, Error> {
+ let align = match self.align {
+ rt::v1::Alignment::Unknown => default,
+ _ => self.align,
+ };
+
+ let (pre_pad, post_pad) = match align {
+ rt::v1::Alignment::Left => (0, padding),
+ rt::v1::Alignment::Right | rt::v1::Alignment::Unknown => (padding, 0),
+ rt::v1::Alignment::Center => (padding / 2, (padding + 1) / 2),
+ };
+
+ for _ in 0..pre_pad {
+ self.buf.write_char(self.fill)?;
+ }
+
+ Ok(PostPadding::new(self.fill, post_pad))
+ }
+
+ /// Takes the formatted parts and applies the padding.
+ /// Assumes that the caller already has rendered the parts with required precision,
+ /// so that `self.precision` can be ignored.
+ fn pad_formatted_parts(&mut self, formatted: &flt2dec::Formatted<'_>) -> Result {
+ if let Some(mut width) = self.width {
+ // for the sign-aware zero padding, we render the sign first and
+ // behave as if we had no sign from the beginning.
+ let mut formatted = formatted.clone();
+ let old_fill = self.fill;
+ let old_align = self.align;
+ let mut align = old_align;
+ if self.sign_aware_zero_pad() {
+ // a sign always goes first
+ let sign = formatted.sign;
+ self.buf.write_str(sign)?;
+
+ // remove the sign from the formatted parts
+ formatted.sign = "";
+ width = width.saturating_sub(sign.len());
+ align = rt::v1::Alignment::Right;
+ self.fill = '0';
+ self.align = rt::v1::Alignment::Right;
+ }
+
+ // remaining parts go through the ordinary padding process.
+ let len = formatted.len();
+ let ret = if width <= len {
+ // no padding
+ self.write_formatted_parts(&formatted)
+ } else {
+ let post_padding = self.padding(width - len, align)?;
+ self.write_formatted_parts(&formatted)?;
+ post_padding.write(self.buf)
+ };
+ self.fill = old_fill;
+ self.align = old_align;
+ ret
+ } else {
+ // this is the common case and we take a shortcut
+ self.write_formatted_parts(formatted)
+ }
+ }
+
+ fn write_formatted_parts(&mut self, formatted: &flt2dec::Formatted<'_>) -> Result {
+ fn write_bytes(buf: &mut dyn Write, s: &[u8]) -> Result {
+ // SAFETY: This is used for `flt2dec::Part::Num` and `flt2dec::Part::Copy`.
+ // It's safe to use for `flt2dec::Part::Num` since every char `c` is between
+ // `b'0'` and `b'9'`, which means `s` is valid UTF-8.
+ // It's also probably safe in practice to use for `flt2dec::Part::Copy(buf)`
+ // since `buf` should be plain ASCII, but it's possible for someone to pass
+ // in a bad value for `buf` into `flt2dec::to_shortest_str` since it is a
+ // public function.
+ // FIXME: Determine whether this could result in UB.
+ buf.write_str(unsafe { str::from_utf8_unchecked(s) })
+ }
+
+ if !formatted.sign.is_empty() {
+ self.buf.write_str(formatted.sign)?;
+ }
+ for part in formatted.parts {
+ match *part {
+ flt2dec::Part::Zero(mut nzeroes) => {
+ const ZEROES: &str = // 64 zeroes
+ "0000000000000000000000000000000000000000000000000000000000000000";
+ while nzeroes > ZEROES.len() {
+ self.buf.write_str(ZEROES)?;
+ nzeroes -= ZEROES.len();
+ }
+ if nzeroes > 0 {
+ self.buf.write_str(&ZEROES[..nzeroes])?;
+ }
+ }
+ flt2dec::Part::Num(mut v) => {
+ let mut s = [0; 5];
+ let len = part.len();
+ for c in s[..len].iter_mut().rev() {
+ *c = b'0' + (v % 10) as u8;
+ v /= 10;
+ }
+ write_bytes(self.buf, &s[..len])?;
+ }
+ flt2dec::Part::Copy(buf) => {
+ write_bytes(self.buf, buf)?;
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Writes some data to the underlying buffer contained within this
+ /// formatter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// formatter.write_str("Foo")
+ /// // This is equivalent to:
+ /// // write!(formatter, "Foo")
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{}", Foo), "Foo");
+ /// assert_eq!(&format!("{:0>8}", Foo), "Foo");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn write_str(&mut self, data: &str) -> Result {
+ self.buf.write_str(data)
+ }
+
+ /// Writes some formatted information into this instance.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// formatter.write_fmt(format_args!("Foo {}", self.0))
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{}", Foo(-1)), "Foo -1");
+ /// assert_eq!(&format!("{:0>8}", Foo(2)), "Foo 2");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn write_fmt(&mut self, fmt: Arguments<'_>) -> Result {
+ write(self.buf, fmt)
+ }
+
+ /// Flags for formatting
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.24.0",
+ reason = "use the `sign_plus`, `sign_minus`, `alternate`, \
+ or `sign_aware_zero_pad` methods instead"
+ )]
+ pub fn flags(&self) -> u32 {
+ self.flags
+ }
+
+ /// Character used as 'fill' whenever there is alignment.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// let c = formatter.fill();
+ /// if let Some(width) = formatter.width() {
+ /// for _ in 0..width {
+ /// write!(formatter, "{}", c)?;
+ /// }
+ /// Ok(())
+ /// } else {
+ /// write!(formatter, "{}", c)
+ /// }
+ /// }
+ /// }
+ ///
+ /// // We set alignment to the left with ">".
+ /// assert_eq!(&format!("{:G>3}", Foo), "GGG");
+ /// assert_eq!(&format!("{:t>6}", Foo), "tttttt");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn fill(&self) -> char {
+ self.fill
+ }
+
+ /// Flag indicating what form of alignment was requested.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// extern crate core;
+ ///
+ /// use std::fmt::{self, Alignment};
+ ///
+ /// struct Foo;
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// let s = if let Some(s) = formatter.align() {
+ /// match s {
+ /// Alignment::Left => "left",
+ /// Alignment::Right => "right",
+ /// Alignment::Center => "center",
+ /// }
+ /// } else {
+ /// "into the void"
+ /// };
+ /// write!(formatter, "{}", s)
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:<}", Foo), "left");
+ /// assert_eq!(&format!("{:>}", Foo), "right");
+ /// assert_eq!(&format!("{:^}", Foo), "center");
+ /// assert_eq!(&format!("{}", Foo), "into the void");
+ /// ```
+ #[stable(feature = "fmt_flags_align", since = "1.28.0")]
+ pub fn align(&self) -> Option<Alignment> {
+ match self.align {
+ rt::v1::Alignment::Left => Some(Alignment::Left),
+ rt::v1::Alignment::Right => Some(Alignment::Right),
+ rt::v1::Alignment::Center => Some(Alignment::Center),
+ rt::v1::Alignment::Unknown => None,
+ }
+ }
+
+ /// Optionally specified integer width that the output should be.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if let Some(width) = formatter.width() {
+ /// // If we received a width, we use it
+ /// write!(formatter, "{:width$}", &format!("Foo({})", self.0), width = width)
+ /// } else {
+ /// // Otherwise we do nothing special
+ /// write!(formatter, "Foo({})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:10}", Foo(23)), "Foo(23) ");
+ /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn width(&self) -> Option<usize> {
+ self.width
+ }
+
+ /// Optionally specified precision for numeric types. Alternatively, the
+ /// maximum width for string types.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(f32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if let Some(precision) = formatter.precision() {
+ /// // If we received a precision, we use it.
+ /// write!(formatter, "Foo({1:.*})", precision, self.0)
+ /// } else {
+ /// // Otherwise we default to 2.
+ /// write!(formatter, "Foo({:.2})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:.4}", Foo(23.2)), "Foo(23.2000)");
+ /// assert_eq!(&format!("{}", Foo(23.2)), "Foo(23.20)");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn precision(&self) -> Option<usize> {
+ self.precision
+ }
+
+ /// Determines if the `+` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if formatter.sign_plus() {
+ /// write!(formatter,
+ /// "Foo({}{})",
+ /// if self.0 < 0 { '-' } else { '+' },
+ /// self.0)
+ /// } else {
+ /// write!(formatter, "Foo({})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:+}", Foo(23)), "Foo(+23)");
+ /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn sign_plus(&self) -> bool {
+ self.flags & (1 << FlagV1::SignPlus as u32) != 0
+ }
+
+ /// Determines if the `-` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if formatter.sign_minus() {
+ /// // You want a minus sign? Have one!
+ /// write!(formatter, "-Foo({})", self.0)
+ /// } else {
+ /// write!(formatter, "Foo({})", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:-}", Foo(23)), "-Foo(23)");
+ /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn sign_minus(&self) -> bool {
+ self.flags & (1 << FlagV1::SignMinus as u32) != 0
+ }
+
+ /// Determines if the `#` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// if formatter.alternate() {
+ /// write!(formatter, "Foo({})", self.0)
+ /// } else {
+ /// write!(formatter, "{}", self.0)
+ /// }
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:#}", Foo(23)), "Foo(23)");
+ /// assert_eq!(&format!("{}", Foo(23)), "23");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn alternate(&self) -> bool {
+ self.flags & (1 << FlagV1::Alternate as u32) != 0
+ }
+
+ /// Determines if the `0` flag was specified.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// struct Foo(i32);
+ ///
+ /// impl fmt::Display for Foo {
+ /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ /// assert!(formatter.sign_aware_zero_pad());
+ /// assert_eq!(formatter.width(), Some(4));
+ /// // We ignore the formatter's options.
+ /// write!(formatter, "{}", self.0)
+ /// }
+ /// }
+ ///
+ /// assert_eq!(&format!("{:04}", Foo(23)), "23");
+ /// ```
+ #[stable(feature = "fmt_flags", since = "1.5.0")]
+ pub fn sign_aware_zero_pad(&self) -> bool {
+ self.flags & (1 << FlagV1::SignAwareZeroPad as u32) != 0
+ }
+
+ // FIXME: Decide what public API we want for these two flags.
+ // https://github.com/rust-lang/rust/issues/48584
+ fn debug_lower_hex(&self) -> bool {
+ self.flags & (1 << FlagV1::DebugLowerHex as u32) != 0
+ }
+
+ fn debug_upper_hex(&self) -> bool {
+ self.flags & (1 << FlagV1::DebugUpperHex as u32) != 0
+ }
+
+ /// Creates a [`DebugStruct`] builder designed to assist with creation of
+ /// [`fmt::Debug`] implementations for structs.
+ ///
+ /// [`fmt::Debug`]: self::Debug
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ /// use std::net::Ipv4Addr;
+ ///
+ /// struct Foo {
+ /// bar: i32,
+ /// baz: String,
+ /// addr: Ipv4Addr,
+ /// }
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_struct("Foo")
+ /// .field("bar", &self.bar)
+ /// .field("baz", &self.baz)
+ /// .field("addr", &format_args!("{}", self.addr))
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// "Foo { bar: 10, baz: \"Hello World\", addr: 127.0.0.1 }",
+ /// format!("{:?}", Foo {
+ /// bar: 10,
+ /// baz: "Hello World".to_string(),
+ /// addr: Ipv4Addr::new(127, 0, 0, 1),
+ /// })
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_struct<'b>(&'b mut self, name: &str) -> DebugStruct<'b, 'a> {
+ builders::debug_struct_new(self, name)
+ }
+
+ /// Creates a `DebugTuple` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for tuple structs.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ /// use std::marker::PhantomData;
+ ///
+ /// struct Foo<T>(i32, String, PhantomData<T>);
+ ///
+ /// impl<T> fmt::Debug for Foo<T> {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_tuple("Foo")
+ /// .field(&self.0)
+ /// .field(&self.1)
+ /// .field(&format_args!("_"))
+ /// .finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// "Foo(10, \"Hello\", _)",
+ /// format!("{:?}", Foo(10, "Hello".to_string(), PhantomData::<u8>))
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_tuple<'b>(&'b mut self, name: &str) -> DebugTuple<'b, 'a> {
+ builders::debug_tuple_new(self, name)
+ }
+
+ /// Creates a `DebugList` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for list-like structures.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_list().entries(self.0.iter()).finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(format!("{:?}", Foo(vec![10, 11])), "[10, 11]");
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_list<'b>(&'b mut self) -> DebugList<'b, 'a> {
+ builders::debug_list_new(self)
+ }
+
+ /// Creates a `DebugSet` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for set-like structures.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<i32>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_set().entries(self.0.iter()).finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(format!("{:?}", Foo(vec![10, 11])), "{10, 11}");
+ /// ```
+ ///
+ /// [`format_args!`]: crate::format_args
+ ///
+ /// In this more complex example, we use [`format_args!`] and `.debug_set()`
+ /// to build a list of match arms:
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Arm<'a, L: 'a, R: 'a>(&'a (L, R));
+ /// struct Table<'a, K: 'a, V: 'a>(&'a [(K, V)], V);
+ ///
+ /// impl<'a, L, R> fmt::Debug for Arm<'a, L, R>
+ /// where
+ /// L: 'a + fmt::Debug, R: 'a + fmt::Debug
+ /// {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// L::fmt(&(self.0).0, fmt)?;
+ /// fmt.write_str(" => ")?;
+ /// R::fmt(&(self.0).1, fmt)
+ /// }
+ /// }
+ ///
+ /// impl<'a, K, V> fmt::Debug for Table<'a, K, V>
+ /// where
+ /// K: 'a + fmt::Debug, V: 'a + fmt::Debug
+ /// {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_set()
+ /// .entries(self.0.iter().map(Arm))
+ /// .entry(&Arm(&(format_args!("_"), &self.1)))
+ /// .finish()
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_set<'b>(&'b mut self) -> DebugSet<'b, 'a> {
+ builders::debug_set_new(self)
+ }
+
+ /// Creates a `DebugMap` builder designed to assist with creation of
+ /// `fmt::Debug` implementations for map-like structures.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::fmt;
+ ///
+ /// struct Foo(Vec<(String, i32)>);
+ ///
+ /// impl fmt::Debug for Foo {
+ /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ /// fmt.debug_map().entries(self.0.iter().map(|&(ref k, ref v)| (k, v))).finish()
+ /// }
+ /// }
+ ///
+ /// assert_eq!(
+ /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])),
+ /// r#"{"A": 10, "B": 11}"#
+ /// );
+ /// ```
+ #[stable(feature = "debug_builders", since = "1.2.0")]
+ pub fn debug_map<'b>(&'b mut self) -> DebugMap<'b, 'a> {
+ builders::debug_map_new(self)
+ }
+}
+
+#[stable(since = "1.2.0", feature = "formatter_write")]
+impl Write for Formatter<'_> {
+ fn write_str(&mut self, s: &str) -> Result {
+ self.buf.write_str(s)
+ }
+
+ fn write_char(&mut self, c: char) -> Result {
+ self.buf.write_char(c)
+ }
+
+ fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
+ write(self.buf, args)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for Error {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Display::fmt("an error occurred when formatting an argument", f)
+ }
+}
+
+// Implementations of the core formatting traits
+
+macro_rules! fmt_refs {
+ ($($tr:ident),*) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + $tr> $tr for &T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
+ }
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + $tr> $tr for &mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
+ }
+ )*
+ }
+}
+
+fmt_refs! { Debug, Display, Octal, Binary, LowerHex, UpperHex, LowerExp, UpperExp }
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl Debug for ! {
+ fn fmt(&self, _: &mut Formatter<'_>) -> Result {
+ *self
+ }
+}
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl Display for ! {
+ fn fmt(&self, _: &mut Formatter<'_>) -> Result {
+ *self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for bool {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Display::fmt(self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for bool {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Display::fmt(if *self { "true" } else { "false" }, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for str {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.write_char('"')?;
+ let mut from = 0;
+ for (i, c) in self.char_indices() {
+ let esc = c.escape_debug();
+ // If char needs escaping, flush backlog so far and write, else skip
+ if esc.len() != 1 {
+ f.write_str(&self[from..i])?;
+ for c in esc {
+ f.write_char(c)?;
+ }
+ from = i + c.len_utf8();
+ }
+ }
+ f.write_str(&self[from..])?;
+ f.write_char('"')
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for str {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.pad(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for char {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.write_char('\'')?;
+ for c in self.escape_debug() {
+ f.write_char(c)?
+ }
+ f.write_char('\'')
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Display for char {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ if f.width.is_none() && f.precision.is_none() {
+ f.write_char(*self)
+ } else {
+ f.pad(self.encode_utf8(&mut [0; 4]))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for *const T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ let old_width = f.width;
+ let old_flags = f.flags;
+
+ // The alternate flag is already treated by LowerHex as being special-
+ // it denotes whether to prefix with 0x. We use it to work out whether
+ // or not to zero extend, and then unconditionally set it to get the
+ // prefix.
+ if f.alternate() {
+ f.flags |= 1 << (FlagV1::SignAwareZeroPad as u32);
+
+ if f.width.is_none() {
+ f.width = Some((usize::BITS / 4) as usize + 2);
+ }
+ }
+ f.flags |= 1 << (FlagV1::Alternate as u32);
+
+ let ret = LowerHex::fmt(&(*self as *const () as usize), f);
+
+ f.width = old_width;
+ f.flags = old_flags;
+
+ ret
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for *mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(&(*self as *const T), f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for &T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(&(*self as *const T), f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Pointer for &mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(&(&**self as *const T), f)
+ }
+}
+
+// Implementation of Display/Debug for various core types
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Debug for *const T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(self, f)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Debug for *mut T {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Pointer::fmt(self, f)
+ }
+}
+
+macro_rules! peel {
+ ($name:ident, $($other:ident,)*) => (tuple! { $($other,)* })
+}
+
+macro_rules! tuple {
+ () => ();
+ ( $($name:ident,)+ ) => (
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($name:Debug),+> Debug for ($($name,)+) where last_type!($($name,)+): ?Sized {
+ #[allow(non_snake_case, unused_assignments)]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ let mut builder = f.debug_tuple("");
+ let ($(ref $name,)+) = *self;
+ $(
+ builder.field(&$name);
+ )+
+
+ builder.finish()
+ }
+ }
+ peel! { $($name,)+ }
+ )
+}
+
+macro_rules! last_type {
+ ($a:ident,) => { $a };
+ ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
+}
+
+tuple! { T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Debug> Debug for [T] {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Debug for () {
+ #[inline]
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.pad("()")
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Debug for PhantomData<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.pad("PhantomData")
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Copy + Debug> Debug for Cell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.debug_struct("Cell").field("value", &self.get()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Debug> Debug for RefCell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ match self.try_borrow() {
+ Ok(borrow) => f.debug_struct("RefCell").field("value", &borrow).finish(),
+ Err(_) => {
+ // The RefCell is mutably borrowed so we can't look at its value
+ // here. Show a placeholder instead.
+ struct BorrowedPlaceholder;
+
+ impl Debug for BorrowedPlaceholder {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.write_str("<borrowed>")
+ }
+ }
+
+ f.debug_struct("RefCell").field("value", &BorrowedPlaceholder).finish()
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Debug> Debug for Ref<'_, T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Debug> Debug for RefMut<'_, T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ Debug::fmt(&*(self.deref()), f)
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: ?Sized + Debug> Debug for UnsafeCell<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result {
+ f.pad("UnsafeCell")
+ }
+}
+
+// If you expected tests to be here, look instead at the core/tests/fmt.rs file,
+// it's a lot easier than creating all of the rt::Piece structures here.
+// There are also tests in the alloc crate, for those that need allocations.
--- /dev/null
+//! Integer and floating-point number formatting
+
+use crate::fmt;
+use crate::mem::MaybeUninit;
+use crate::num::flt2dec;
+use crate::ops::{Div, Rem, Sub};
+use crate::ptr;
+use crate::slice;
+use crate::str;
+
+#[doc(hidden)]
+trait DisplayInt:
+ PartialEq + PartialOrd + Div<Output = Self> + Rem<Output = Self> + Sub<Output = Self> + Copy
+{
+ fn zero() -> Self;
+ fn from_u8(u: u8) -> Self;
+ fn to_u8(&self) -> u8;
+ fn to_u16(&self) -> u16;
+ fn to_u32(&self) -> u32;
+ fn to_u64(&self) -> u64;
+ fn to_u128(&self) -> u128;
+}
+
+macro_rules! impl_int {
+ ($($t:ident)*) => (
+ $(impl DisplayInt for $t {
+ fn zero() -> Self { 0 }
+ fn from_u8(u: u8) -> Self { u as Self }
+ fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u16(&self) -> u16 { *self as u16 }
+ fn to_u32(&self) -> u32 { *self as u32 }
+ fn to_u64(&self) -> u64 { *self as u64 }
+ fn to_u128(&self) -> u128 { *self as u128 }
+ })*
+ )
+}
+macro_rules! impl_uint {
+ ($($t:ident)*) => (
+ $(impl DisplayInt for $t {
+ fn zero() -> Self { 0 }
+ fn from_u8(u: u8) -> Self { u as Self }
+ fn to_u8(&self) -> u8 { *self as u8 }
+ fn to_u16(&self) -> u16 { *self as u16 }
+ fn to_u32(&self) -> u32 { *self as u32 }
+ fn to_u64(&self) -> u64 { *self as u64 }
+ fn to_u128(&self) -> u128 { *self as u128 }
+ })*
+ )
+}
+
+impl_int! { i8 i16 i32 i64 i128 isize }
+impl_uint! { u8 u16 u32 u64 u128 usize }
+
+/// A type that represents a specific radix
+#[doc(hidden)]
+trait GenericRadix: Sized {
+ /// The number of digits.
+ const BASE: u8;
+
+ /// A radix-specific prefix string.
+ const PREFIX: &'static str;
+
+ /// Converts an integer to corresponding radix digit.
+ fn digit(x: u8) -> u8;
+
+ /// Format an integer using the radix using a formatter.
+ fn fmt_int<T: DisplayInt>(&self, mut x: T, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // The radix can be as low as 2, so we need a buffer of at least 128
+ // characters for a base 2 number.
+ let zero = T::zero();
+ let is_nonnegative = x >= zero;
+ let mut buf = [MaybeUninit::<u8>::uninit(); 128];
+ let mut curr = buf.len();
+ let base = T::from_u8(Self::BASE);
+ if is_nonnegative {
+ // Accumulate each digit of the number from the least significant
+ // to the most significant figure.
+ for byte in buf.iter_mut().rev() {
+ let n = x % base; // Get the current place value.
+ x = x / base; // Deaccumulate the number.
+ byte.write(Self::digit(n.to_u8())); // Store the digit in the buffer.
+ curr -= 1;
+ if x == zero {
+ // No more digits left to accumulate.
+ break;
+ };
+ }
+ } else {
+ // Do the same as above, but accounting for two's complement.
+ for byte in buf.iter_mut().rev() {
+ let n = zero - (x % base); // Get the current place value.
+ x = x / base; // Deaccumulate the number.
+ byte.write(Self::digit(n.to_u8())); // Store the digit in the buffer.
+ curr -= 1;
+ if x == zero {
+ // No more digits left to accumulate.
+ break;
+ };
+ }
+ }
+ let buf = &buf[curr..];
+ // SAFETY: The only chars in `buf` are created by `Self::digit` which are assumed to be
+ // valid UTF-8
+ let buf = unsafe {
+ str::from_utf8_unchecked(slice::from_raw_parts(
+ MaybeUninit::slice_as_ptr(buf),
+ buf.len(),
+ ))
+ };
+ f.pad_integral(is_nonnegative, Self::PREFIX, buf)
+ }
+}
+
+/// A binary (base 2) radix
+#[derive(Clone, PartialEq)]
+struct Binary;
+
+/// An octal (base 8) radix
+#[derive(Clone, PartialEq)]
+struct Octal;
+
+/// A hexadecimal (base 16) radix, formatted with lower-case characters
+#[derive(Clone, PartialEq)]
+struct LowerHex;
+
+/// A hexadecimal (base 16) radix, formatted with upper-case characters
+#[derive(Clone, PartialEq)]
+struct UpperHex;
+
+macro_rules! radix {
+ ($T:ident, $base:expr, $prefix:expr, $($x:pat => $conv:expr),+) => {
+ impl GenericRadix for $T {
+ const BASE: u8 = $base;
+ const PREFIX: &'static str = $prefix;
+ fn digit(x: u8) -> u8 {
+ match x {
+ $($x => $conv,)+
+ x => panic!("number not in the range 0..={}: {}", Self::BASE - 1, x),
+ }
+ }
+ }
+ }
+}
+
+radix! { Binary, 2, "0b", x @ 0 ..= 1 => b'0' + x }
+radix! { Octal, 8, "0o", x @ 0 ..= 7 => b'0' + x }
+radix! { LowerHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, x @ 10 ..= 15 => b'a' + (x - 10) }
+radix! { UpperHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, x @ 10 ..= 15 => b'A' + (x - 10) }
+
+macro_rules! int_base {
+ (fmt::$Trait:ident for $T:ident as $U:ident -> $Radix:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl fmt::$Trait for $T {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ $Radix.fmt_int(*self as $U, f)
+ }
+ }
+ };
+}
+
+macro_rules! integer {
+ ($Int:ident, $Uint:ident) => {
+ int_base! { fmt::Binary for $Int as $Uint -> Binary }
+ int_base! { fmt::Octal for $Int as $Uint -> Octal }
+ int_base! { fmt::LowerHex for $Int as $Uint -> LowerHex }
+ int_base! { fmt::UpperHex for $Int as $Uint -> UpperHex }
+
+ int_base! { fmt::Binary for $Uint as $Uint -> Binary }
+ int_base! { fmt::Octal for $Uint as $Uint -> Octal }
+ int_base! { fmt::LowerHex for $Uint as $Uint -> LowerHex }
+ int_base! { fmt::UpperHex for $Uint as $Uint -> UpperHex }
+ };
+}
+integer! { isize, usize }
+integer! { i8, u8 }
+integer! { i16, u16 }
+integer! { i32, u32 }
+integer! { i64, u64 }
+integer! { i128, u128 }
+macro_rules! debug {
+ ($($T:ident)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl fmt::Debug for $T {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if f.debug_lower_hex() {
+ fmt::LowerHex::fmt(self, f)
+ } else if f.debug_upper_hex() {
+ fmt::UpperHex::fmt(self, f)
+ } else {
+ fmt::Display::fmt(self, f)
+ }
+ }
+ }
+ )*};
+}
+debug! {
+ i8 i16 i32 i64 i128 isize
+ u8 u16 u32 u64 u128 usize
+}
+
+// 2 digit decimal look up table
+static DEC_DIGITS_LUT: &[u8; 200] = b"0001020304050607080910111213141516171819\
+ 2021222324252627282930313233343536373839\
+ 4041424344454647484950515253545556575859\
+ 6061626364656667686970717273747576777879\
+ 8081828384858687888990919293949596979899";
+
+macro_rules! impl_Display {
+ ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => {
+ fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // 2^128 is about 3*10^38, so 39 gives an extra byte of space
+ let mut buf = [MaybeUninit::<u8>::uninit(); 39];
+ let mut curr = buf.len() as isize;
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+
+ // SAFETY: Since `d1` and `d2` are always less than or equal to `198`, we
+ // can copy from `lut_ptr[d1..d1 + 1]` and `lut_ptr[d2..d2 + 1]`. To show
+ // that it's OK to copy into `buf_ptr`, notice that at the beginning
+ // `curr == buf.len() == 39 > log(n)` since `n < 2^128 < 10^39`, and at
+ // each step this is kept the same as `n` is divided. Since `n` is always
+ // non-negative, this means that `curr > 0` so `buf_ptr[curr..curr + 1]`
+ // is safe to access.
+ unsafe {
+ // need at least 16 bits for the 4-characters-at-a-time to work.
+ assert!(crate::mem::size_of::<$u>() >= 2);
+
+ // eagerly decode 4 characters at a time
+ while n >= 10000 {
+ let rem = (n % 10000) as isize;
+ n /= 10000;
+
+ let d1 = (rem / 100) << 1;
+ let d2 = (rem % 100) << 1;
+ curr -= 4;
+
+ // We are allowed to copy to `buf_ptr[curr..curr + 3]` here since
+ // otherwise `curr < 0`. But then `n` was originally at least `10000^10`
+ // which is `10^40 > 2^128 > n`.
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
+ }
+
+ // if we reach here numbers are <= 9999, so at most 4 chars long
+ let mut n = n as isize; // possibly reduce 64bit math
+
+ // decode 2 more chars, if > 2 chars
+ if n >= 100 {
+ let d1 = (n % 100) << 1;
+ n /= 100;
+ curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+
+ // decode last 1 or 2 chars
+ if n < 10 {
+ curr -= 1;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
+ } else {
+ let d1 = n << 1;
+ curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+ }
+
+ // SAFETY: `curr` > 0 (since we made `buf` large enough), and all the chars are valid
+ // UTF-8 since `DEC_DIGITS_LUT` is
+ let buf_slice = unsafe {
+ str::from_utf8_unchecked(
+ slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize))
+ };
+ f.pad_integral(is_nonnegative, "", buf_slice)
+ }
+
+ $(#[stable(feature = "rust1", since = "1.0.0")]
+ impl fmt::Display for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ $name(n, is_nonnegative, f)
+ }
+ })*
+ };
+}
+
+macro_rules! impl_Exp {
+ ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => {
+ fn $name(
+ mut n: $u,
+ is_nonnegative: bool,
+ upper: bool,
+ f: &mut fmt::Formatter<'_>
+ ) -> fmt::Result {
+ let (mut n, mut exponent, trailing_zeros, added_precision) = {
+ let mut exponent = 0;
+ // count and remove trailing decimal zeroes
+ while n % 10 == 0 && n >= 10 {
+ n /= 10;
+ exponent += 1;
+ }
+ let trailing_zeros = exponent;
+
+ let (added_precision, subtracted_precision) = match f.precision() {
+ Some(fmt_prec) => {
+ // number of decimal digits minus 1
+ let mut tmp = n;
+ let mut prec = 0;
+ while tmp >= 10 {
+ tmp /= 10;
+ prec += 1;
+ }
+ (fmt_prec.saturating_sub(prec), prec.saturating_sub(fmt_prec))
+ }
+ None => (0,0)
+ };
+ for _ in 1..subtracted_precision {
+ n/=10;
+ exponent += 1;
+ }
+ if subtracted_precision != 0 {
+ let rem = n % 10;
+ n /= 10;
+ exponent += 1;
+ // round up last digit
+ if rem >= 5 {
+ n += 1;
+ }
+ }
+ (n, exponent, trailing_zeros, added_precision)
+ };
+
+ // 39 digits (worst case u128) + . = 40
+ // Since `curr` always decreases by the number of digits copied, this means
+ // that `curr >= 0`.
+ let mut buf = [MaybeUninit::<u8>::uninit(); 40];
+ let mut curr = buf.len() as isize; //index for buf
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+
+ // decode 2 chars at a time
+ while n >= 100 {
+ let d1 = ((n % 100) as isize) << 1;
+ curr -= 2;
+ // SAFETY: `d1 <= 198`, so we can copy from `lut_ptr[d1..d1 + 2]` since
+ // `DEC_DIGITS_LUT` has a length of 200.
+ unsafe {
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
+ }
+ n /= 100;
+ exponent += 2;
+ }
+ // n is <= 99, so at most 2 chars long
+ let mut n = n as isize; // possibly reduce 64bit math
+ // decode second-to-last character
+ if n >= 10 {
+ curr -= 1;
+ // SAFETY: Safe since `40 > curr >= 0` (see comment)
+ unsafe {
+ *buf_ptr.offset(curr) = (n as u8 % 10_u8) + b'0';
+ }
+ n /= 10;
+ exponent += 1;
+ }
+ // add decimal point iff >1 mantissa digit will be printed
+ if exponent != trailing_zeros || added_precision != 0 {
+ curr -= 1;
+ // SAFETY: Safe since `40 > curr >= 0`
+ unsafe {
+ *buf_ptr.offset(curr) = b'.';
+ }
+ }
+
+ // SAFETY: Safe since `40 > curr >= 0`
+ let buf_slice = unsafe {
+ // decode last character
+ curr -= 1;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
+
+ let len = buf.len() - curr as usize;
+ slice::from_raw_parts(buf_ptr.offset(curr), len)
+ };
+
+ // stores 'e' (or 'E') and the up to 2-digit exponent
+ let mut exp_buf = [MaybeUninit::<u8>::uninit(); 3];
+ let exp_ptr = MaybeUninit::slice_as_mut_ptr(&mut exp_buf);
+ // SAFETY: In either case, `exp_buf` is written within bounds and `exp_ptr[..len]`
+ // is contained within `exp_buf` since `len <= 3`.
+ let exp_slice = unsafe {
+ *exp_ptr.offset(0) = if upper {b'E'} else {b'e'};
+ let len = if exponent < 10 {
+ *exp_ptr.offset(1) = (exponent as u8) + b'0';
+ 2
+ } else {
+ let off = exponent << 1;
+ ptr::copy_nonoverlapping(lut_ptr.offset(off), exp_ptr.offset(1), 2);
+ 3
+ };
+ slice::from_raw_parts(exp_ptr, len)
+ };
+
+ let parts = &[
+ flt2dec::Part::Copy(buf_slice),
+ flt2dec::Part::Zero(added_precision),
+ flt2dec::Part::Copy(exp_slice)
+ ];
+ let sign = if !is_nonnegative {
+ "-"
+ } else if f.sign_plus() {
+ "+"
+ } else {
+ ""
+ };
+ let formatted = flt2dec::Formatted{sign, parts};
+ f.pad_formatted_parts(&formatted)
+ }
+
+ $(
+ #[stable(feature = "integer_exp_format", since = "1.42.0")]
+ impl fmt::LowerExp for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ $name(n, is_nonnegative, false, f)
+ }
+ })*
+ $(
+ #[stable(feature = "integer_exp_format", since = "1.42.0")]
+ impl fmt::UpperExp for $t {
+ #[allow(unused_comparisons)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.$conv_fn()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.$conv_fn()).wrapping_add(1)
+ };
+ $name(n, is_nonnegative, true, f)
+ }
+ })*
+ };
+}
+
+// Include wasm32 in here since it doesn't reflect the native pointer size, and
+// often cares strongly about getting a smaller code size.
+#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))]
+mod imp {
+ use super::*;
+ impl_Display!(
+ i8, u8, i16, u16, i32, u32, i64, u64, usize, isize
+ as u64 via to_u64 named fmt_u64
+ );
+ impl_Exp!(
+ i8, u8, i16, u16, i32, u32, i64, u64, usize, isize
+ as u64 via to_u64 named exp_u64
+ );
+}
+
+#[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))]
+mod imp {
+ use super::*;
+ impl_Display!(i8, u8, i16, u16, i32, u32, isize, usize as u32 via to_u32 named fmt_u32);
+ impl_Display!(i64, u64 as u64 via to_u64 named fmt_u64);
+ impl_Exp!(i8, u8, i16, u16, i32, u32, isize, usize as u32 via to_u32 named exp_u32);
+ impl_Exp!(i64, u64 as u64 via to_u64 named exp_u64);
+}
+impl_Exp!(i128, u128 as u128 via to_u128 named exp_u128);
+
+/// Helper function for writing a u64 into `buf` going from last to first, with `curr`.
+fn parse_u64_into<const N: usize>(mut n: u64, buf: &mut [MaybeUninit<u8>; N], curr: &mut isize) {
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(buf);
+ let lut_ptr = DEC_DIGITS_LUT.as_ptr();
+ assert!(*curr > 19);
+
+ // SAFETY:
+ // Writes at most 19 characters into the buffer. Guaranteed that any ptr into LUT is at most
+ // 198, so will never OOB. There is a check above that there are at least 19 characters
+ // remaining.
+ unsafe {
+ if n >= 1e16 as u64 {
+ let to_parse = n % 1e16 as u64;
+ n /= 1e16 as u64;
+
+ // Some of these are nops but it looks more elegant this way.
+ let d1 = ((to_parse / 1e14 as u64) % 100) << 1;
+ let d2 = ((to_parse / 1e12 as u64) % 100) << 1;
+ let d3 = ((to_parse / 1e10 as u64) % 100) << 1;
+ let d4 = ((to_parse / 1e8 as u64) % 100) << 1;
+ let d5 = ((to_parse / 1e6 as u64) % 100) << 1;
+ let d6 = ((to_parse / 1e4 as u64) % 100) << 1;
+ let d7 = ((to_parse / 1e2 as u64) % 100) << 1;
+ let d8 = ((to_parse / 1e0 as u64) % 100) << 1;
+
+ *curr -= 16;
+
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d5 as isize), buf_ptr.offset(*curr + 8), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d6 as isize), buf_ptr.offset(*curr + 10), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d7 as isize), buf_ptr.offset(*curr + 12), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d8 as isize), buf_ptr.offset(*curr + 14), 2);
+ }
+ if n >= 1e8 as u64 {
+ let to_parse = n % 1e8 as u64;
+ n /= 1e8 as u64;
+
+ // Some of these are nops but it looks more elegant this way.
+ let d1 = ((to_parse / 1e6 as u64) % 100) << 1;
+ let d2 = ((to_parse / 1e4 as u64) % 100) << 1;
+ let d3 = ((to_parse / 1e2 as u64) % 100) << 1;
+ let d4 = ((to_parse / 1e0 as u64) % 100) << 1;
+ *curr -= 8;
+
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d3 as isize), buf_ptr.offset(*curr + 4), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d4 as isize), buf_ptr.offset(*curr + 6), 2);
+ }
+ // `n` < 1e8 < (1 << 32)
+ let mut n = n as u32;
+ if n >= 1e4 as u32 {
+ let to_parse = n % 1e4 as u32;
+ n /= 1e4 as u32;
+
+ let d1 = (to_parse / 100) << 1;
+ let d2 = (to_parse % 100) << 1;
+ *curr -= 4;
+
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr + 0), 2);
+ ptr::copy_nonoverlapping(lut_ptr.offset(d2 as isize), buf_ptr.offset(*curr + 2), 2);
+ }
+
+ // `n` < 1e4 < (1 << 16)
+ let mut n = n as u16;
+ if n >= 100 {
+ let d1 = (n % 100) << 1;
+ n /= 100;
+ *curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2);
+ }
+
+ // decode last 1 or 2 chars
+ if n < 10 {
+ *curr -= 1;
+ *buf_ptr.offset(*curr) = (n as u8) + b'0';
+ } else {
+ let d1 = n << 1;
+ *curr -= 2;
+ ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(*curr), 2);
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for u128 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt_u128(*self, true, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for i128 {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let is_nonnegative = *self >= 0;
+ let n = if is_nonnegative {
+ self.to_u128()
+ } else {
+ // convert the negative num to positive by summing 1 to it's 2 complement
+ (!self.to_u128()).wrapping_add(1)
+ };
+ fmt_u128(n, is_nonnegative, f)
+ }
+}
+
+/// Specialized optimization for u128. Instead of taking two items at a time, it splits
+/// into at most 2 u64s, and then chunks by 10e16, 10e8, 10e4, 10e2, and then 10e1.
+/// It also has to handle 1 last item, as 10^40 > 2^128 > 10^39, whereas
+/// 10^20 > 2^64 > 10^19.
+fn fmt_u128(n: u128, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // 2^128 is about 3*10^38, so 39 gives an extra byte of space
+ let mut buf = [MaybeUninit::<u8>::uninit(); 39];
+ let mut curr = buf.len() as isize;
+
+ let (n, rem) = udiv_1e19(n);
+ parse_u64_into(rem, &mut buf, &mut curr);
+
+ if n != 0 {
+ // 0 pad up to point
+ let target = (buf.len() - 19) as isize;
+ // SAFETY: Guaranteed that we wrote at most 19 bytes, and there must be space
+ // remaining since it has length 39
+ unsafe {
+ ptr::write_bytes(
+ MaybeUninit::slice_as_mut_ptr(&mut buf).offset(target),
+ b'0',
+ (curr - target) as usize,
+ );
+ }
+ curr = target;
+
+ let (n, rem) = udiv_1e19(n);
+ parse_u64_into(rem, &mut buf, &mut curr);
+ // Should this following branch be annotated with unlikely?
+ if n != 0 {
+ let target = (buf.len() - 38) as isize;
+ // The raw `buf_ptr` pointer is only valid until `buf` is used the next time,
+ // buf `buf` is not used in this scope so we are good.
+ let buf_ptr = MaybeUninit::slice_as_mut_ptr(&mut buf);
+ // SAFETY: At this point we wrote at most 38 bytes, pad up to that point,
+ // There can only be at most 1 digit remaining.
+ unsafe {
+ ptr::write_bytes(buf_ptr.offset(target), b'0', (curr - target) as usize);
+ curr = target - 1;
+ *buf_ptr.offset(curr) = (n as u8) + b'0';
+ }
+ }
+ }
+
+ // SAFETY: `curr` > 0 (since we made `buf` large enough), and all the chars are valid
+ // UTF-8 since `DEC_DIGITS_LUT` is
+ let buf_slice = unsafe {
+ str::from_utf8_unchecked(slice::from_raw_parts(
+ MaybeUninit::slice_as_mut_ptr(&mut buf).offset(curr),
+ buf.len() - curr as usize,
+ ))
+ };
+ f.pad_integral(is_nonnegative, "", buf_slice)
+}
+
+/// Partition of `n` into n > 1e19 and rem <= 1e19
+fn udiv_1e19(n: u128) -> (u128, u64) {
+ const DIV: u64 = 1e19 as u64;
+ let high = (n >> 64) as u64;
+ if high == 0 {
+ let low = n as u64;
+ return ((low / DIV) as u128, low % DIV);
+ }
+ let sr = 65 - high.leading_zeros();
+ let mut q = n << (128 - sr);
+ let mut r = n >> sr;
+ let mut carry = 0;
+
+ for _ in 0..sr {
+ r = (r << 1) | (q >> 127);
+ q = (q << 1) | carry as u128;
+
+ let s = (DIV as u128).wrapping_sub(r).wrapping_sub(1) as i128 >> 127;
+ carry = (s & 1) as u64;
+ r -= (DIV as u128) & s as u128;
+ }
+ ((q << 1) | carry as u128, r as u64)
+}
--- /dev/null
+//! This is an internal module used by the ifmt! runtime. These structures are
+//! emitted to static arrays to precompile format strings ahead of time.
+//!
+//! These definitions are similar to their `ct` equivalents, but differ in that
+//! these can be statically allocated and are slightly optimized for the runtime
+#![allow(missing_debug_implementations)]
+
+#[derive(Copy, Clone)]
+pub struct Argument {
+ pub position: usize,
+ pub format: FormatSpec,
+}
+
+#[derive(Copy, Clone)]
+pub struct FormatSpec {
+ pub fill: char,
+ pub align: Alignment,
+ pub flags: u32,
+ pub precision: Count,
+ pub width: Count,
+}
+
+/// Possible alignments that can be requested as part of a formatting directive.
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum Alignment {
+ /// Indication that contents should be left-aligned.
+ Left,
+ /// Indication that contents should be right-aligned.
+ Right,
+ /// Indication that contents should be center-aligned.
+ Center,
+ /// No alignment was requested.
+ Unknown,
+}
+
+/// Used by [width](https://doc.rust-lang.org/std/fmt/#width) and [precision](https://doc.rust-lang.org/std/fmt/#precision) specifiers.
+#[derive(Copy, Clone)]
+pub enum Count {
+ /// Specified with a literal number, stores the value
+ Is(usize),
+ /// Specified using `$` and `*` syntaxes, stores the index into `args`
+ Param(usize),
+ /// Not specified
+ Implied,
+}
--- /dev/null
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+use crate::marker::Unpin;
+use crate::ops;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// A future represents an asynchronous computation.
+///
+/// A future is a value that may not have finished computing yet. This kind of
+/// "asynchronous value" makes it possible for a thread to continue doing useful
+/// work while it waits for the value to become available.
+///
+/// # The `poll` method
+///
+/// The core method of future, `poll`, *attempts* to resolve the future into a
+/// final value. This method does not block if the value is not ready. Instead,
+/// the current task is scheduled to be woken up when it's possible to make
+/// further progress by `poll`ing again. The `context` passed to the `poll`
+/// method can provide a [`Waker`], which is a handle for waking up the current
+/// task.
+///
+/// When using a future, you generally won't call `poll` directly, but instead
+/// `.await` the value.
+///
+/// [`Waker`]: crate::task::Waker
+#[doc(spotlight)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[stable(feature = "futures_api", since = "1.36.0")]
+#[lang = "future_trait"]
+#[rustc_on_unimplemented(label = "`{Self}` is not a future", message = "`{Self}` is not a future")]
+pub trait Future {
+ /// The type of value produced on completion.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ type Output;
+
+ /// Attempt to resolve the future to a final value, registering
+ /// the current task for wakeup if the value is not yet available.
+ ///
+ /// # Return value
+ ///
+ /// This function returns:
+ ///
+ /// - [`Poll::Pending`] if the future is not ready yet
+ /// - [`Poll::Ready(val)`] with the result `val` of this future if it
+ /// finished successfully.
+ ///
+ /// Once a future has finished, clients should not `poll` it again.
+ ///
+ /// When a future is not ready yet, `poll` returns `Poll::Pending` and
+ /// stores a clone of the [`Waker`] copied from the current [`Context`].
+ /// This [`Waker`] is then woken once the future can make progress.
+ /// For example, a future waiting for a socket to become
+ /// readable would call `.clone()` on the [`Waker`] and store it.
+ /// When a signal arrives elsewhere indicating that the socket is readable,
+ /// [`Waker::wake`] is called and the socket future's task is awoken.
+ /// Once a task has been woken up, it should attempt to `poll` the future
+ /// again, which may or may not produce a final value.
+ ///
+ /// Note that on multiple calls to `poll`, only the [`Waker`] from the
+ /// [`Context`] passed to the most recent call should be scheduled to
+ /// receive a wakeup.
+ ///
+ /// # Runtime characteristics
+ ///
+ /// Futures alone are *inert*; they must be *actively* `poll`ed to make
+ /// progress, meaning that each time the current task is woken up, it should
+ /// actively re-`poll` pending futures that it still has an interest in.
+ ///
+ /// The `poll` function is not called repeatedly in a tight loop -- instead,
+ /// it should only be called when the future indicates that it is ready to
+ /// make progress (by calling `wake()`). If you're familiar with the
+ /// `poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures
+ /// typically do *not* suffer the same problems of "all wakeups must poll
+ /// all events"; they are more like `epoll(4)`.
+ ///
+ /// An implementation of `poll` should strive to return quickly, and should
+ /// not block. Returning quickly prevents unnecessarily clogging up
+ /// threads or event loops. If it is known ahead of time that a call to
+ /// `poll` may end up taking awhile, the work should be offloaded to a
+ /// thread pool (or something similar) to ensure that `poll` can return
+ /// quickly.
+ ///
+ /// # Panics
+ ///
+ /// Once a future has completed (returned `Ready` from `poll`), calling its
+ /// `poll` method again may panic, block forever, or cause other kinds of
+ /// problems; the `Future` trait places no requirements on the effects of
+ /// such a call. However, as the `poll` method is not marked `unsafe`,
+ /// Rust's usual rules apply: calls must never cause undefined behavior
+ /// (memory corruption, incorrect use of `unsafe` functions, or the like),
+ /// regardless of the future's state.
+ ///
+ /// [`Poll::Ready(val)`]: Poll::Ready
+ /// [`Waker`]: crate::task::Waker
+ /// [`Waker::wake`]: crate::task::Waker::wake
+ #[lang = "poll"]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output>;
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<F: ?Sized + Future + Unpin> Future for &mut F {
+ type Output = F::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ F::poll(Pin::new(&mut **self), cx)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<P> Future for Pin<P>
+where
+ P: Unpin + ops::DerefMut<Target: Future>,
+{
+ type Output = <<P as ops::Deref>::Target as Future>::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ Pin::get_mut(self).as_mut().poll(cx)
+ }
+}
--- /dev/null
+use crate::future::Future;
+
+/// Conversion into a `Future`.
+#[unstable(feature = "into_future", issue = "67644")]
+pub trait IntoFuture {
+ /// The output that the future will produce on completion.
+ #[unstable(feature = "into_future", issue = "67644")]
+ type Output;
+
+ /// Which kind of future are we turning this into?
+ #[unstable(feature = "into_future", issue = "67644")]
+ type Future: Future<Output = Self::Output>;
+
+ /// Creates a future from a value.
+ #[unstable(feature = "into_future", issue = "67644")]
+ fn into_future(self) -> Self::Future;
+}
+
+#[unstable(feature = "into_future", issue = "67644")]
+impl<F: Future> IntoFuture for F {
+ type Output = F::Output;
+ type Future = F;
+
+ fn into_future(self) -> Self::Future {
+ self
+ }
+}
--- /dev/null
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+//! Asynchronous values.
+
+use crate::{
+ ops::{Generator, GeneratorState},
+ pin::Pin,
+ ptr::NonNull,
+ task::{Context, Poll},
+};
+
+mod future;
+mod into_future;
+mod pending;
+mod poll_fn;
+mod ready;
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use self::future::Future;
+
+#[unstable(feature = "into_future", issue = "67644")]
+pub use into_future::IntoFuture;
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub use pending::{pending, Pending};
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub use ready::{ready, Ready};
+
+#[unstable(feature = "future_poll_fn", issue = "72302")]
+pub use poll_fn::{poll_fn, PollFn};
+
+/// This type is needed because:
+///
+/// a) Generators cannot implement `for<'a, 'b> Generator<&'a mut Context<'b>>`, so we need to pass
+/// a raw pointer (see <https://github.com/rust-lang/rust/issues/68923>).
+/// b) Raw pointers and `NonNull` aren't `Send` or `Sync`, so that would make every single future
+/// non-Send/Sync as well, and we don't want that.
+///
+/// It also simplifies the HIR lowering of `.await`.
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[derive(Debug, Copy, Clone)]
+pub struct ResumeTy(NonNull<Context<'static>>);
+
+#[unstable(feature = "gen_future", issue = "50547")]
+unsafe impl Send for ResumeTy {}
+
+#[unstable(feature = "gen_future", issue = "50547")]
+unsafe impl Sync for ResumeTy {}
+
+/// Wrap a generator in a future.
+///
+/// This function returns a `GenFuture` underneath, but hides it in `impl Trait` to give
+/// better error messages (`impl Future` rather than `GenFuture<[closure.....]>`).
+// This is `const` to avoid extra errors after we recover from `const async fn`
+#[lang = "from_generator"]
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[rustc_const_unstable(feature = "gen_future", issue = "50547")]
+#[inline]
+pub const fn from_generator<T>(gen: T) -> impl Future<Output = T::Return>
+where
+ T: Generator<ResumeTy, Yield = ()>,
+{
+ #[rustc_diagnostic_item = "gen_future"]
+ struct GenFuture<T: Generator<ResumeTy, Yield = ()>>(T);
+
+ // We rely on the fact that async/await futures are immovable in order to create
+ // self-referential borrows in the underlying generator.
+ impl<T: Generator<ResumeTy, Yield = ()>> !Unpin for GenFuture<T> {}
+
+ impl<T: Generator<ResumeTy, Yield = ()>> Future for GenFuture<T> {
+ type Output = T::Return;
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // SAFETY: Safe because we're !Unpin + !Drop, and this is just a field projection.
+ let gen = unsafe { Pin::map_unchecked_mut(self, |s| &mut s.0) };
+
+ // Resume the generator, turning the `&mut Context` into a `NonNull` raw pointer. The
+ // `.await` lowering will safely cast that back to a `&mut Context`.
+ match gen.resume(ResumeTy(NonNull::from(cx).cast::<Context<'static>>())) {
+ GeneratorState::Yielded(()) => Poll::Pending,
+ GeneratorState::Complete(x) => Poll::Ready(x),
+ }
+ }
+ }
+
+ GenFuture(gen)
+}
+
+#[lang = "get_context"]
+#[doc(hidden)]
+#[unstable(feature = "gen_future", issue = "50547")]
+#[inline]
+pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
+ // SAFETY: the caller must guarantee that `cx.0` is a valid pointer
+ // that fulfills all the requirements for a mutable reference.
+ unsafe { &mut *cx.0.as_ptr().cast() }
+}
--- /dev/null
+use crate::fmt::{self, Debug};
+use crate::future::Future;
+use crate::marker;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// This `struct` is created by [`pending()`]. See its
+/// documentation for more.
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Pending<T> {
+ _data: marker::PhantomData<T>,
+}
+
+/// Creates a future which never resolves, representing a computation that never
+/// finishes.
+///
+/// # Examples
+///
+/// ```no_run
+/// use core::future;
+///
+/// # async fn run() {
+/// let future = future::pending();
+/// let () = future.await;
+/// unreachable!();
+/// # }
+/// ```
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub fn pending<T>() -> Pending<T> {
+ Pending { _data: marker::PhantomData }
+}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Future for Pending<T> {
+ type Output = T;
+
+ fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<T> {
+ Poll::Pending
+ }
+}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Unpin for Pending<T> {}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Debug for Pending<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Pending").finish()
+ }
+}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Clone for Pending<T> {
+ fn clone(&self) -> Self {
+ pending()
+ }
+}
--- /dev/null
+use crate::fmt;
+use crate::future::Future;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// Creates a future that wraps a function returning `Poll`.
+///
+/// Polling the future delegates to the wrapped function.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(future_poll_fn)]
+/// # async fn run() {
+/// use core::future::poll_fn;
+/// use core::task::{Context, Poll};
+///
+/// fn read_line(_cx: &mut Context<'_>) -> Poll<String> {
+/// Poll::Ready("Hello, World!".into())
+/// }
+///
+/// let read_future = poll_fn(read_line);
+/// assert_eq!(read_future.await, "Hello, World!".to_owned());
+/// # };
+/// ```
+#[unstable(feature = "future_poll_fn", issue = "72302")]
+pub fn poll_fn<T, F>(f: F) -> PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ PollFn { f }
+}
+
+/// A Future that wraps a function returning `Poll`.
+///
+/// This `struct` is created by [`poll_fn()`]. See its
+/// documentation for more.
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+#[unstable(feature = "future_poll_fn", issue = "72302")]
+pub struct PollFn<F> {
+ f: F,
+}
+
+#[unstable(feature = "future_poll_fn", issue = "72302")]
+impl<F> Unpin for PollFn<F> {}
+
+#[unstable(feature = "future_poll_fn", issue = "72302")]
+impl<F> fmt::Debug for PollFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PollFn").finish()
+ }
+}
+
+#[unstable(feature = "future_poll_fn", issue = "72302")]
+impl<T, F> Future for PollFn<F>
+where
+ F: FnMut(&mut Context<'_>) -> Poll<T>,
+{
+ type Output = T;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ (&mut self.f)(cx)
+ }
+}
--- /dev/null
+use crate::future::Future;
+use crate::pin::Pin;
+use crate::task::{Context, Poll};
+
+/// Creates a future that is immediately ready with a value.
+///
+/// This `struct` is created by [`ready()`]. See its
+/// documentation for more.
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+#[derive(Debug, Clone)]
+#[must_use = "futures do nothing unless you `.await` or poll them"]
+pub struct Ready<T>(Option<T>);
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Unpin for Ready<T> {}
+
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+impl<T> Future for Ready<T> {
+ type Output = T;
+
+ #[inline]
+ fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<T> {
+ Poll::Ready(self.0.take().expect("Ready polled after completion"))
+ }
+}
+
+/// Creates a future that is immediately ready with a value.
+///
+/// Futures created through this function are functionally similar to those
+/// created through `async {}`. The main difference is that futures created
+/// through this function are named and implement `Unpin`.
+///
+/// # Examples
+///
+/// ```
+/// use core::future;
+///
+/// # async fn run() {
+/// let a = future::ready(1);
+/// assert_eq!(a.await, 1);
+/// # }
+/// ```
+#[stable(feature = "future_readiness_fns", since = "1.48.0")]
+pub fn ready<T>(t: T) -> Ready<T> {
+ Ready(Some(t))
+}
--- /dev/null
+//! Generic hashing support.
+//!
+//! This module provides a generic way to compute the hash of a value. The
+//! simplest way to make a type hashable is to use `#[derive(Hash)]`:
+//!
+//! # Examples
+//!
+//! ```rust
+//! use std::collections::hash_map::DefaultHasher;
+//! use std::hash::{Hash, Hasher};
+//!
+//! #[derive(Hash)]
+//! struct Person {
+//! id: u32,
+//! name: String,
+//! phone: u64,
+//! }
+//!
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
+//!
+//! assert!(calculate_hash(&person1) != calculate_hash(&person2));
+//!
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
+//! t.hash(&mut s);
+//! s.finish()
+//! }
+//! ```
+//!
+//! If you need more control over how a value is hashed, you need to implement
+//! the [`Hash`] trait:
+//!
+//! ```rust
+//! use std::collections::hash_map::DefaultHasher;
+//! use std::hash::{Hash, Hasher};
+//!
+//! struct Person {
+//! id: u32,
+//! # #[allow(dead_code)]
+//! name: String,
+//! phone: u64,
+//! }
+//!
+//! impl Hash for Person {
+//! fn hash<H: Hasher>(&self, state: &mut H) {
+//! self.id.hash(state);
+//! self.phone.hash(state);
+//! }
+//! }
+//!
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
+//!
+//! assert_eq!(calculate_hash(&person1), calculate_hash(&person2));
+//!
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
+//! t.hash(&mut s);
+//! s.finish()
+//! }
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::fmt;
+use crate::marker;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+pub use self::sip::SipHasher;
+
+#[unstable(feature = "hashmap_internals", issue = "none")]
+#[allow(deprecated)]
+#[doc(hidden)]
+pub use self::sip::SipHasher13;
+
+mod sip;
+
+/// A hashable type.
+///
+/// Types implementing `Hash` are able to be [`hash`]ed with an instance of
+/// [`Hasher`].
+///
+/// ## Implementing `Hash`
+///
+/// You can derive `Hash` with `#[derive(Hash)]` if all fields implement `Hash`.
+/// The resulting hash will be the combination of the values from calling
+/// [`hash`] on each field.
+///
+/// ```
+/// #[derive(Hash)]
+/// struct Rustacean {
+/// name: String,
+/// country: String,
+/// }
+/// ```
+///
+/// If you need more control over how a value is hashed, you can of course
+/// implement the `Hash` trait yourself:
+///
+/// ```
+/// use std::hash::{Hash, Hasher};
+///
+/// struct Person {
+/// id: u32,
+/// name: String,
+/// phone: u64,
+/// }
+///
+/// impl Hash for Person {
+/// fn hash<H: Hasher>(&self, state: &mut H) {
+/// self.id.hash(state);
+/// self.phone.hash(state);
+/// }
+/// }
+/// ```
+///
+/// ## `Hash` and `Eq`
+///
+/// When implementing both `Hash` and [`Eq`], it is important that the following
+/// property holds:
+///
+/// ```text
+/// k1 == k2 -> hash(k1) == hash(k2)
+/// ```
+///
+/// In other words, if two keys are equal, their hashes must also be equal.
+/// [`HashMap`] and [`HashSet`] both rely on this behavior.
+///
+/// Thankfully, you won't need to worry about upholding this property when
+/// deriving both [`Eq`] and `Hash` with `#[derive(PartialEq, Eq, Hash)]`.
+///
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+/// [`HashSet`]: ../../std/collections/struct.HashSet.html
+/// [`hash`]: Hash::hash
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Hash {
+ /// Feeds this value into the given [`Hasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::{Hash, Hasher};
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// 7920.hash(&mut hasher);
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn hash<H: Hasher>(&self, state: &mut H);
+
+ /// Feeds a slice of this type into the given [`Hasher`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::{Hash, Hasher};
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// let numbers = [6, 28, 496, 8128];
+ /// Hash::hash_slice(&numbers, &mut hasher);
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ #[stable(feature = "hash_slice", since = "1.3.0")]
+ fn hash_slice<H: Hasher>(data: &[Self], state: &mut H)
+ where
+ Self: Sized,
+ {
+ for piece in data {
+ piece.hash(state);
+ }
+ }
+}
+
+// Separate module to reexport the macro `Hash` from prelude without the trait `Hash`.
+pub(crate) mod macros {
+ /// Derive macro generating an impl of the trait `Hash`.
+ #[rustc_builtin_macro]
+ #[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+ #[allow_internal_unstable(core_intrinsics)]
+ pub macro Hash($item:item) {
+ /* compiler built-in */
+ }
+}
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(inline)]
+pub use macros::Hash;
+
+/// A trait for hashing an arbitrary stream of bytes.
+///
+/// Instances of `Hasher` usually represent state that is changed while hashing
+/// data.
+///
+/// `Hasher` provides a fairly basic interface for retrieving the generated hash
+/// (with [`finish`]), and writing integers as well as slices of bytes into an
+/// instance (with [`write`] and [`write_u8`] etc.). Most of the time, `Hasher`
+/// instances are used in conjunction with the [`Hash`] trait.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::DefaultHasher;
+/// use std::hash::Hasher;
+///
+/// let mut hasher = DefaultHasher::new();
+///
+/// hasher.write_u32(1989);
+/// hasher.write_u8(11);
+/// hasher.write_u8(9);
+/// hasher.write(b"Huh?");
+///
+/// println!("Hash is {:x}!", hasher.finish());
+/// ```
+///
+/// [`finish`]: Hasher::finish
+/// [`write`]: Hasher::write
+/// [`write_u8`]: Hasher::write_u8
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Hasher {
+ /// Returns the hash value for the values written so far.
+ ///
+ /// Despite its name, the method does not reset the hasher’s internal
+ /// state. Additional [`write`]s will continue from the current value.
+ /// If you need to start a fresh hash value, you will have to create
+ /// a new hasher.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// hasher.write(b"Cool!");
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ ///
+ /// [`write`]: Hasher::write
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn finish(&self) -> u64;
+
+ /// Writes some data into this `Hasher`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::DefaultHasher;
+ /// use std::hash::Hasher;
+ ///
+ /// let mut hasher = DefaultHasher::new();
+ /// let data = [0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef];
+ ///
+ /// hasher.write(&data);
+ ///
+ /// println!("Hash is {:x}!", hasher.finish());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn write(&mut self, bytes: &[u8]);
+
+ /// Writes a single `u8` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u8(&mut self, i: u8) {
+ self.write(&[i])
+ }
+ /// Writes a single `u16` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u16(&mut self, i: u16) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `u32` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u32(&mut self, i: u32) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `u64` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_u64(&mut self, i: u64) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `u128` into this hasher.
+ #[inline]
+ #[stable(feature = "i128", since = "1.26.0")]
+ fn write_u128(&mut self, i: u128) {
+ self.write(&i.to_ne_bytes())
+ }
+ /// Writes a single `usize` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_usize(&mut self, i: usize) {
+ self.write(&i.to_ne_bytes())
+ }
+
+ /// Writes a single `i8` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i8(&mut self, i: i8) {
+ self.write_u8(i as u8)
+ }
+ /// Writes a single `i16` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i16(&mut self, i: i16) {
+ self.write_u16(i as u16)
+ }
+ /// Writes a single `i32` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i32(&mut self, i: i32) {
+ self.write_u32(i as u32)
+ }
+ /// Writes a single `i64` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_i64(&mut self, i: i64) {
+ self.write_u64(i as u64)
+ }
+ /// Writes a single `i128` into this hasher.
+ #[inline]
+ #[stable(feature = "i128", since = "1.26.0")]
+ fn write_i128(&mut self, i: i128) {
+ self.write_u128(i as u128)
+ }
+ /// Writes a single `isize` into this hasher.
+ #[inline]
+ #[stable(feature = "hasher_write", since = "1.3.0")]
+ fn write_isize(&mut self, i: isize) {
+ self.write_usize(i as usize)
+ }
+}
+
+#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
+impl<H: Hasher + ?Sized> Hasher for &mut H {
+ fn finish(&self) -> u64 {
+ (**self).finish()
+ }
+ fn write(&mut self, bytes: &[u8]) {
+ (**self).write(bytes)
+ }
+ fn write_u8(&mut self, i: u8) {
+ (**self).write_u8(i)
+ }
+ fn write_u16(&mut self, i: u16) {
+ (**self).write_u16(i)
+ }
+ fn write_u32(&mut self, i: u32) {
+ (**self).write_u32(i)
+ }
+ fn write_u64(&mut self, i: u64) {
+ (**self).write_u64(i)
+ }
+ fn write_u128(&mut self, i: u128) {
+ (**self).write_u128(i)
+ }
+ fn write_usize(&mut self, i: usize) {
+ (**self).write_usize(i)
+ }
+ fn write_i8(&mut self, i: i8) {
+ (**self).write_i8(i)
+ }
+ fn write_i16(&mut self, i: i16) {
+ (**self).write_i16(i)
+ }
+ fn write_i32(&mut self, i: i32) {
+ (**self).write_i32(i)
+ }
+ fn write_i64(&mut self, i: i64) {
+ (**self).write_i64(i)
+ }
+ fn write_i128(&mut self, i: i128) {
+ (**self).write_i128(i)
+ }
+ fn write_isize(&mut self, i: isize) {
+ (**self).write_isize(i)
+ }
+}
+
+/// A trait for creating instances of [`Hasher`].
+///
+/// A `BuildHasher` is typically used (e.g., by [`HashMap`]) to create
+/// [`Hasher`]s for each key such that they are hashed independently of one
+/// another, since [`Hasher`]s contain state.
+///
+/// For each instance of `BuildHasher`, the [`Hasher`]s created by
+/// [`build_hasher`] should be identical. That is, if the same stream of bytes
+/// is fed into each hasher, the same output will also be generated.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::RandomState;
+/// use std::hash::{BuildHasher, Hasher};
+///
+/// let s = RandomState::new();
+/// let mut hasher_1 = s.build_hasher();
+/// let mut hasher_2 = s.build_hasher();
+///
+/// hasher_1.write_u32(8128);
+/// hasher_2.write_u32(8128);
+///
+/// assert_eq!(hasher_1.finish(), hasher_2.finish());
+/// ```
+///
+/// [`build_hasher`]: BuildHasher::build_hasher
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+#[stable(since = "1.7.0", feature = "build_hasher")]
+pub trait BuildHasher {
+ /// Type of the hasher that will be created.
+ #[stable(since = "1.7.0", feature = "build_hasher")]
+ type Hasher: Hasher;
+
+ /// Creates a new hasher.
+ ///
+ /// Each call to `build_hasher` on the same instance should produce identical
+ /// [`Hasher`]s.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::hash_map::RandomState;
+ /// use std::hash::BuildHasher;
+ ///
+ /// let s = RandomState::new();
+ /// let new_s = s.build_hasher();
+ /// ```
+ #[stable(since = "1.7.0", feature = "build_hasher")]
+ fn build_hasher(&self) -> Self::Hasher;
+}
+
+/// Used to create a default [`BuildHasher`] instance for types that implement
+/// [`Hasher`] and [`Default`].
+///
+/// `BuildHasherDefault<H>` can be used when a type `H` implements [`Hasher`] and
+/// [`Default`], and you need a corresponding [`BuildHasher`] instance, but none is
+/// defined.
+///
+/// Any `BuildHasherDefault` is [zero-sized]. It can be created with
+/// [`default`][method.default]. When using `BuildHasherDefault` with [`HashMap`] or
+/// [`HashSet`], this doesn't need to be done, since they implement appropriate
+/// [`Default`] instances themselves.
+///
+/// # Examples
+///
+/// Using `BuildHasherDefault` to specify a custom [`BuildHasher`] for
+/// [`HashMap`]:
+///
+/// ```
+/// use std::collections::HashMap;
+/// use std::hash::{BuildHasherDefault, Hasher};
+///
+/// #[derive(Default)]
+/// struct MyHasher;
+///
+/// impl Hasher for MyHasher {
+/// fn write(&mut self, bytes: &[u8]) {
+/// // Your hashing algorithm goes here!
+/// unimplemented!()
+/// }
+///
+/// fn finish(&self) -> u64 {
+/// // Your hashing algorithm goes here!
+/// unimplemented!()
+/// }
+/// }
+///
+/// type MyBuildHasher = BuildHasherDefault<MyHasher>;
+///
+/// let hash_map = HashMap::<u32, u32, MyBuildHasher>::default();
+/// ```
+///
+/// [method.default]: BuildHasherDefault::default
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+/// [`HashSet`]: ../../std/collections/struct.HashSet.html
+/// [zero-sized]: https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts
+#[stable(since = "1.7.0", feature = "build_hasher")]
+pub struct BuildHasherDefault<H>(marker::PhantomData<H>);
+
+#[stable(since = "1.9.0", feature = "core_impl_debug")]
+impl<H> fmt::Debug for BuildHasherDefault<H> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("BuildHasherDefault")
+ }
+}
+
+#[stable(since = "1.7.0", feature = "build_hasher")]
+impl<H: Default + Hasher> BuildHasher for BuildHasherDefault<H> {
+ type Hasher = H;
+
+ fn build_hasher(&self) -> H {
+ H::default()
+ }
+}
+
+#[stable(since = "1.7.0", feature = "build_hasher")]
+impl<H> Clone for BuildHasherDefault<H> {
+ fn clone(&self) -> BuildHasherDefault<H> {
+ BuildHasherDefault(marker::PhantomData)
+ }
+}
+
+#[stable(since = "1.7.0", feature = "build_hasher")]
+impl<H> Default for BuildHasherDefault<H> {
+ fn default() -> BuildHasherDefault<H> {
+ BuildHasherDefault(marker::PhantomData)
+ }
+}
+
+#[stable(since = "1.29.0", feature = "build_hasher_eq")]
+impl<H> PartialEq for BuildHasherDefault<H> {
+ fn eq(&self, _other: &BuildHasherDefault<H>) -> bool {
+ true
+ }
+}
+
+#[stable(since = "1.29.0", feature = "build_hasher_eq")]
+impl<H> Eq for BuildHasherDefault<H> {}
+
+mod impls {
+ use crate::mem;
+ use crate::slice;
+
+ use super::*;
+
+ macro_rules! impl_write {
+ ($(($ty:ident, $meth:ident),)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for $ty {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.$meth(*self)
+ }
+
+ fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
+ let newlen = data.len() * mem::size_of::<$ty>();
+ let ptr = data.as_ptr() as *const u8;
+ // SAFETY: `ptr` is valid and aligned, as this macro is only used
+ // for numeric primitives which have no padding. The new slice only
+ // spans across `data` and is never mutated, and its total size is the
+ // same as the original `data` so it can't be over `isize::MAX`.
+ state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
+ }
+ }
+ )*}
+ }
+
+ impl_write! {
+ (u8, write_u8),
+ (u16, write_u16),
+ (u32, write_u32),
+ (u64, write_u64),
+ (usize, write_usize),
+ (i8, write_i8),
+ (i16, write_i16),
+ (i32, write_i32),
+ (i64, write_i64),
+ (isize, write_isize),
+ (u128, write_u128),
+ (i128, write_i128),
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for bool {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_u8(*self as u8)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for char {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_u32(*self as u32)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for str {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write(self.as_bytes());
+ state.write_u8(0xff)
+ }
+ }
+
+ #[stable(feature = "never_hash", since = "1.29.0")]
+ impl Hash for ! {
+ fn hash<H: Hasher>(&self, _: &mut H) {
+ *self
+ }
+ }
+
+ macro_rules! impl_hash_tuple {
+ () => (
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Hash for () {
+ fn hash<H: Hasher>(&self, _state: &mut H) {}
+ }
+ );
+
+ ( $($name:ident)+) => (
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($name: Hash),+> Hash for ($($name,)+) where last_type!($($name,)+): ?Sized {
+ #[allow(non_snake_case)]
+ fn hash<S: Hasher>(&self, state: &mut S) {
+ let ($(ref $name,)+) = *self;
+ $($name.hash(state);)+
+ }
+ }
+ );
+ }
+
+ macro_rules! last_type {
+ ($a:ident,) => { $a };
+ ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
+ }
+
+ impl_hash_tuple! {}
+ impl_hash_tuple! { A }
+ impl_hash_tuple! { A B }
+ impl_hash_tuple! { A B C }
+ impl_hash_tuple! { A B C D }
+ impl_hash_tuple! { A B C D E }
+ impl_hash_tuple! { A B C D E F }
+ impl_hash_tuple! { A B C D E F G }
+ impl_hash_tuple! { A B C D E F G H }
+ impl_hash_tuple! { A B C D E F G H I }
+ impl_hash_tuple! { A B C D E F G H I J }
+ impl_hash_tuple! { A B C D E F G H I J K }
+ impl_hash_tuple! { A B C D E F G H I J K L }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: Hash> Hash for [T] {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.len().hash(state);
+ Hash::hash_slice(self, state)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + Hash> Hash for &T {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized + Hash> Hash for &mut T {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Hash for *const T {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ if mem::size_of::<Self>() == mem::size_of::<usize>() {
+ // Thin pointer
+ state.write_usize(*self as *const () as usize);
+ } else {
+ // Fat pointer
+ // SAFETY: we are accessing the memory occupied by `self`
+ // which is guaranteed to be valid.
+ // This assumes a fat pointer can be represented by a `(usize, usize)`,
+ // which is safe to do in `std` because it is shipped and kept in sync
+ // with the implementation of fat pointers in `rustc`.
+ let (a, b) = unsafe { *(self as *const Self as *const (usize, usize)) };
+ state.write_usize(a);
+ state.write_usize(b);
+ }
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Hash for *mut T {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ if mem::size_of::<Self>() == mem::size_of::<usize>() {
+ // Thin pointer
+ state.write_usize(*self as *const () as usize);
+ } else {
+ // Fat pointer
+ // SAFETY: we are accessing the memory occupied by `self`
+ // which is guaranteed to be valid.
+ // This assumes a fat pointer can be represented by a `(usize, usize)`,
+ // which is safe to do in `std` because it is shipped and kept in sync
+ // with the implementation of fat pointers in `rustc`.
+ let (a, b) = unsafe { *(self as *const Self as *const (usize, usize)) };
+ state.write_usize(a);
+ state.write_usize(b);
+ }
+ }
+ }
+}
--- /dev/null
+//! An implementation of SipHash.
+
+#![allow(deprecated)] // the types in this module are deprecated
+
+use crate::cmp;
+use crate::marker::PhantomData;
+use crate::mem;
+use crate::ptr;
+
+/// An implementation of SipHash 1-3.
+///
+/// This is currently the default hashing function used by standard library
+/// (e.g., `collections::HashMap` uses it by default).
+///
+/// See: <https://131002.net/siphash>
+#[unstable(feature = "hashmap_internals", issue = "none")]
+#[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+)]
+#[derive(Debug, Clone, Default)]
+#[doc(hidden)]
+pub struct SipHasher13 {
+ hasher: Hasher<Sip13Rounds>,
+}
+
+/// An implementation of SipHash 2-4.
+///
+/// See: <https://131002.net/siphash/>
+#[unstable(feature = "hashmap_internals", issue = "none")]
+#[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+)]
+#[derive(Debug, Clone, Default)]
+struct SipHasher24 {
+ hasher: Hasher<Sip24Rounds>,
+}
+
+/// An implementation of SipHash 2-4.
+///
+/// See: <https://131002.net/siphash/>
+///
+/// SipHash is a general-purpose hashing function: it runs at a good
+/// speed (competitive with Spooky and City) and permits strong _keyed_
+/// hashing. This lets you key your hash tables from a strong RNG, such as
+/// [`rand::os::OsRng`](https://doc.rust-lang.org/rand/rand/os/struct.OsRng.html).
+///
+/// Although the SipHash algorithm is considered to be generally strong,
+/// it is not intended for cryptographic purposes. As such, all
+/// cryptographic uses of this implementation are _strongly discouraged_.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+)]
+#[derive(Debug, Clone, Default)]
+pub struct SipHasher(SipHasher24);
+
+#[derive(Debug)]
+struct Hasher<S: Sip> {
+ k0: u64,
+ k1: u64,
+ length: usize, // how many bytes we've processed
+ state: State, // hash State
+ tail: u64, // unprocessed bytes le
+ ntail: usize, // how many bytes in tail are valid
+ _marker: PhantomData<S>,
+}
+
+#[derive(Debug, Clone, Copy)]
+#[repr(C)]
+struct State {
+ // v0, v2 and v1, v3 show up in pairs in the algorithm,
+ // and simd implementations of SipHash will use vectors
+ // of v02 and v13. By placing them in this order in the struct,
+ // the compiler can pick up on just a few simd optimizations by itself.
+ v0: u64,
+ v2: u64,
+ v1: u64,
+ v3: u64,
+}
+
+macro_rules! compress {
+ ($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
+ $v0 = $v0.wrapping_add($v1);
+ $v1 = $v1.rotate_left(13);
+ $v1 ^= $v0;
+ $v0 = $v0.rotate_left(32);
+ $v2 = $v2.wrapping_add($v3);
+ $v3 = $v3.rotate_left(16);
+ $v3 ^= $v2;
+ $v0 = $v0.wrapping_add($v3);
+ $v3 = $v3.rotate_left(21);
+ $v3 ^= $v0;
+ $v2 = $v2.wrapping_add($v1);
+ $v1 = $v1.rotate_left(17);
+ $v1 ^= $v2;
+ $v2 = $v2.rotate_left(32);
+ }};
+}
+
+/// Loads an integer of the desired type from a byte stream, in LE order. Uses
+/// `copy_nonoverlapping` to let the compiler generate the most efficient way
+/// to load it from a possibly unaligned address.
+///
+/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
+macro_rules! load_int_le {
+ ($buf:expr, $i:expr, $int_ty:ident) => {{
+ debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
+ let mut data = 0 as $int_ty;
+ ptr::copy_nonoverlapping(
+ $buf.as_ptr().add($i),
+ &mut data as *mut _ as *mut u8,
+ mem::size_of::<$int_ty>(),
+ );
+ data.to_le()
+ }};
+}
+
+/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
+/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
+/// sizes and avoid calling `memcpy`, which is good for speed.
+///
+/// Unsafe because: unchecked indexing at start..start+len
+#[inline]
+unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
+ debug_assert!(len < 8);
+ let mut i = 0; // current byte index (from LSB) in the output u64
+ let mut out = 0;
+ if i + 3 < len {
+ // SAFETY: `i` cannot be greater than `len`, and the caller must guarantee
+ // that the index start..start+len is in bounds.
+ out = unsafe { load_int_le!(buf, start + i, u32) } as u64;
+ i += 4;
+ }
+ if i + 1 < len {
+ // SAFETY: same as above.
+ out |= (unsafe { load_int_le!(buf, start + i, u16) } as u64) << (i * 8);
+ i += 2
+ }
+ if i < len {
+ // SAFETY: same as above.
+ out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
+ i += 1;
+ }
+ debug_assert_eq!(i, len);
+ out
+}
+
+impl SipHasher {
+ /// Creates a new `SipHasher` with the two initial keys set to 0.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ pub fn new() -> SipHasher {
+ SipHasher::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher` that is keyed off the provided keys.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
+ SipHasher(SipHasher24 { hasher: Hasher::new_with_keys(key0, key1) })
+ }
+}
+
+impl SipHasher13 {
+ /// Creates a new `SipHasher13` with the two initial keys set to 0.
+ #[inline]
+ #[unstable(feature = "hashmap_internals", issue = "none")]
+ #[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ pub fn new() -> SipHasher13 {
+ SipHasher13::new_with_keys(0, 0)
+ }
+
+ /// Creates a `SipHasher13` that is keyed off the provided keys.
+ #[inline]
+ #[unstable(feature = "hashmap_internals", issue = "none")]
+ #[rustc_deprecated(
+ since = "1.13.0",
+ reason = "use `std::collections::hash_map::DefaultHasher` instead"
+ )]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
+ SipHasher13 { hasher: Hasher::new_with_keys(key0, key1) }
+ }
+}
+
+impl<S: Sip> Hasher<S> {
+ #[inline]
+ fn new_with_keys(key0: u64, key1: u64) -> Hasher<S> {
+ let mut state = Hasher {
+ k0: key0,
+ k1: key1,
+ length: 0,
+ state: State { v0: 0, v1: 0, v2: 0, v3: 0 },
+ tail: 0,
+ ntail: 0,
+ _marker: PhantomData,
+ };
+ state.reset();
+ state
+ }
+
+ #[inline]
+ fn reset(&mut self) {
+ self.length = 0;
+ self.state.v0 = self.k0 ^ 0x736f6d6570736575;
+ self.state.v1 = self.k1 ^ 0x646f72616e646f6d;
+ self.state.v2 = self.k0 ^ 0x6c7967656e657261;
+ self.state.v3 = self.k1 ^ 0x7465646279746573;
+ self.ntail = 0;
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl super::Hasher for SipHasher {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.0.hasher.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0.hasher.finish()
+ }
+}
+
+#[unstable(feature = "hashmap_internals", issue = "none")]
+impl super::Hasher for SipHasher13 {
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.hasher.write(msg)
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.hasher.finish()
+ }
+}
+
+impl<S: Sip> super::Hasher for Hasher<S> {
+ // Note: no integer hashing methods (`write_u*`, `write_i*`) are defined
+ // for this type. We could add them, copy the `short_write` implementation
+ // in librustc_data_structures/sip128.rs, and add `write_u*`/`write_i*`
+ // methods to `SipHasher`, `SipHasher13`, and `DefaultHasher`. This would
+ // greatly speed up integer hashing by those hashers, at the cost of
+ // slightly slowing down compile speeds on some benchmarks. See #69152 for
+ // details.
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ let length = msg.len();
+ self.length += length;
+
+ let mut needed = 0;
+
+ if self.ntail != 0 {
+ needed = 8 - self.ntail;
+ // SAFETY: `cmp::min(length, needed)` is guaranteed to not be over `length`
+ self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << (8 * self.ntail);
+ if length < needed {
+ self.ntail += length;
+ return;
+ } else {
+ self.state.v3 ^= self.tail;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= self.tail;
+ self.ntail = 0;
+ }
+ }
+
+ // Buffered tail is now flushed, process new input.
+ let len = length - needed;
+ let left = len & 0x7; // len % 8
+
+ let mut i = needed;
+ while i < len - left {
+ // SAFETY: because `len - left` is the biggest multiple of 8 under
+ // `len`, and because `i` starts at `needed` where `len` is `length - needed`,
+ // `i + 8` is guaranteed to be less than or equal to `length`.
+ let mi = unsafe { load_int_le!(msg, i, u64) };
+
+ self.state.v3 ^= mi;
+ S::c_rounds(&mut self.state);
+ self.state.v0 ^= mi;
+
+ i += 8;
+ }
+
+ // SAFETY: `i` is now `needed + len.div_euclid(8) * 8`,
+ // so `i + left` = `needed + len` = `length`, which is by
+ // definition equal to `msg.len()`.
+ self.tail = unsafe { u8to64_le(msg, i, left) };
+ self.ntail = left;
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ let mut state = self.state;
+
+ let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail;
+
+ state.v3 ^= b;
+ S::c_rounds(&mut state);
+ state.v0 ^= b;
+
+ state.v2 ^= 0xff;
+ S::d_rounds(&mut state);
+
+ state.v0 ^ state.v1 ^ state.v2 ^ state.v3
+ }
+}
+
+impl<S: Sip> Clone for Hasher<S> {
+ #[inline]
+ fn clone(&self) -> Hasher<S> {
+ Hasher {
+ k0: self.k0,
+ k1: self.k1,
+ length: self.length,
+ state: self.state,
+ tail: self.tail,
+ ntail: self.ntail,
+ _marker: self._marker,
+ }
+ }
+}
+
+impl<S: Sip> Default for Hasher<S> {
+ /// Creates a `Hasher<S>` with the two initial keys set to 0.
+ #[inline]
+ fn default() -> Hasher<S> {
+ Hasher::new_with_keys(0, 0)
+ }
+}
+
+#[doc(hidden)]
+trait Sip {
+ fn c_rounds(_: &mut State);
+ fn d_rounds(_: &mut State);
+}
+
+#[derive(Debug, Clone, Default)]
+struct Sip13Rounds;
+
+impl Sip for Sip13Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+struct Sip24Rounds;
+
+impl Sip for Sip24Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
--- /dev/null
+#![stable(feature = "core_hint", since = "1.27.0")]
+
+//! Hints to compiler that affects how code should be emitted or optimized.
+//! Hints may be compile time or runtime.
+
+use crate::intrinsics;
+
+/// Informs the compiler that this point in the code is not reachable, enabling
+/// further optimizations.
+///
+/// # Safety
+///
+/// Reaching this function is completely *undefined behavior* (UB). In
+/// particular, the compiler assumes that all UB must never happen, and
+/// therefore will eliminate all branches that reach to a call to
+/// `unreachable_unchecked()`.
+///
+/// Like all instances of UB, if this assumption turns out to be wrong, i.e., the
+/// `unreachable_unchecked()` call is actually reachable among all possible
+/// control flow, the compiler will apply the wrong optimization strategy, and
+/// may sometimes even corrupt seemingly unrelated code, causing
+/// difficult-to-debug problems.
+///
+/// Use this function only when you can prove that the code will never call it.
+/// Otherwise, consider using the [`unreachable!`] macro, which does not allow
+/// optimizations but will panic when executed.
+///
+/// # Example
+///
+/// ```
+/// fn div_1(a: u32, b: u32) -> u32 {
+/// use std::hint::unreachable_unchecked;
+///
+/// // `b.saturating_add(1)` is always positive (not zero),
+/// // hence `checked_div` will never return `None`.
+/// // Therefore, the else branch is unreachable.
+/// a.checked_div(b.saturating_add(1))
+/// .unwrap_or_else(|| unsafe { unreachable_unchecked() })
+/// }
+///
+/// assert_eq!(div_1(7, 0), 7);
+/// assert_eq!(div_1(9, 1), 4);
+/// assert_eq!(div_1(11, u32::MAX), 0);
+/// ```
+#[inline]
+#[stable(feature = "unreachable", since = "1.27.0")]
+#[rustc_const_unstable(feature = "const_unreachable_unchecked", issue = "53188")]
+pub const unsafe fn unreachable_unchecked() -> ! {
+ // SAFETY: the safety contract for `intrinsics::unreachable` must
+ // be upheld by the caller.
+ unsafe { intrinsics::unreachable() }
+}
+
+/// Emits a machine instruction to signal the processor that it is running in
+/// a busy-wait spin-loop ("spin lock").
+///
+/// Upon receiving the spin-loop signal the processor can optimize its behavior by,
+/// for example, saving power or switching hyper-threads.
+///
+/// This function is different from [`thread::yield_now`] which directly
+/// yields to the system's scheduler, whereas `spin_loop` does not interact
+/// with the operating system.
+///
+/// A common use case for `spin_loop` is implementing bounded optimistic
+/// spinning in a CAS loop in synchronization primitives. To avoid problems
+/// like priority inversion, it is strongly recommended that the spin loop is
+/// terminated after a finite amount of iterations and an appropriate blocking
+/// syscall is made.
+///
+/// **Note**: On platforms that do not support receiving spin-loop hints this
+/// function does not do anything at all.
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::atomic::{AtomicBool, Ordering};
+/// use std::sync::Arc;
+/// use std::{hint, thread};
+///
+/// // A shared atomic value that threads will use to coordinate
+/// let live = Arc::new(AtomicBool::new(false));
+///
+/// // In a background thread we'll eventually set the value
+/// let bg_work = {
+/// let live = live.clone();
+/// thread::spawn(move || {
+/// // Do some work, then make the value live
+/// do_some_work();
+/// live.store(true, Ordering::Release);
+/// })
+/// };
+///
+/// // Back on our current thread, we wait for the value to be set
+/// while live.load(Ordering::Acquire) {
+/// // The spin loop is a hint to the CPU that we're waiting, but probably
+/// // not for very long
+/// hint::spin_loop();
+/// }
+///
+/// // The value is now set
+/// # fn do_some_work() {}
+/// do_some_work();
+/// bg_work.join()?;
+/// # Ok::<(), Box<dyn core::any::Any + Send + 'static>>(())
+/// ```
+///
+/// [`thread::yield_now`]: ../../std/thread/fn.yield_now.html
+#[inline]
+#[stable(feature = "renamed_spin_loop", since = "1.49.0")]
+pub fn spin_loop() {
+ #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "sse2"))]
+ {
+ #[cfg(target_arch = "x86")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on x86 targets.
+ unsafe { crate::arch::x86::_mm_pause() };
+ }
+
+ #[cfg(target_arch = "x86_64")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on x86_64 targets.
+ unsafe { crate::arch::x86_64::_mm_pause() };
+ }
+ }
+
+ #[cfg(any(target_arch = "aarch64", all(target_arch = "arm", target_feature = "v6")))]
+ {
+ #[cfg(target_arch = "aarch64")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on aarch64 targets.
+ unsafe { crate::arch::aarch64::__yield() };
+ }
+ #[cfg(target_arch = "arm")]
+ {
+ // SAFETY: the `cfg` attr ensures that we only execute this on arm targets
+ // with support for the v6 feature.
+ unsafe { crate::arch::arm::__yield() };
+ }
+ }
+}
+
+/// An identity function that *__hints__* to the compiler to be maximally pessimistic about what
+/// `black_box` could do.
+///
+/// Unlike [`std::convert::identity`], a Rust compiler is encouraged to assume that `black_box` can
+/// use `dummy` in any possible valid way that Rust code is allowed to without introducing undefined
+/// behavior in the calling code. This property makes `black_box` useful for writing code in which
+/// certain optimizations are not desired, such as benchmarks.
+///
+/// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The
+/// extent to which it can block optimisations may vary depending upon the platform and code-gen
+/// backend used. Programs cannot rely on `black_box` for *correctness* in any way.
+///
+/// [`std::convert::identity`]: crate::convert::identity
+#[cfg_attr(not(miri), inline)]
+#[cfg_attr(miri, inline(never))]
+#[unstable(feature = "test", issue = "50297")]
+#[cfg_attr(miri, allow(unused_mut))]
+pub fn black_box<T>(mut dummy: T) -> T {
+ // We need to "use" the argument in some way LLVM can't introspect, and on
+ // targets that support it we can typically leverage inline assembly to do
+ // this. LLVM's interpretation of inline assembly is that it's, well, a black
+ // box. This isn't the greatest implementation since it probably deoptimizes
+ // more than we want, but it's so far good enough.
+
+ #[cfg(not(miri))] // This is just a hint, so it is fine to skip in Miri.
+ // SAFETY: the inline assembly is a no-op.
+ unsafe {
+ // FIXME: Cannot use `asm!` because it doesn't support MIPS and other architectures.
+ llvm_asm!("" : : "r"(&mut dummy) : "memory" : "volatile");
+ }
+
+ dummy
+}
--- /dev/null
+// implements the unary operator "op &T"
+// based on "op T" where T is expected to be `Copy`able
+macro_rules! forward_ref_unop {
+ (impl $imp:ident, $method:ident for $t:ty) => {
+ forward_ref_unop!(impl $imp, $method for $t,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ (impl $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
+ #[$attr]
+ impl $imp for &$t {
+ type Output = <$t as $imp>::Output;
+
+ #[inline]
+ fn $method(self) -> <$t as $imp>::Output {
+ $imp::$method(*self)
+ }
+ }
+ }
+}
+
+// implements binary operators "&T op U", "T op &U", "&T op &U"
+// based on "T op U" where T and U are expected to be `Copy`able
+macro_rules! forward_ref_binop {
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_binop!(impl $imp, $method for $t, $u,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ impl<'a> $imp<$u> for &'a $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, other)
+ }
+ }
+
+ #[$attr]
+ impl $imp<&$u> for $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(self, *other)
+ }
+ }
+
+ #[$attr]
+ impl $imp<&$u> for &$t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, *other)
+ }
+ }
+ }
+}
+
+// implements "T op= &U", based on "T op= U"
+// where U is expected to be `Copy`able
+macro_rules! forward_ref_op_assign {
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_op_assign!(impl $imp, $method for $t, $u,
+ #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]);
+ };
+ (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ impl $imp<&$u> for $t {
+ #[inline]
+ fn $method(&mut self, other: &$u) {
+ $imp::$method(self, *other);
+ }
+ }
+ }
+}
+
+/// Create a zero-size type similar to a closure type, but named.
+#[unstable(feature = "std_internals", issue = "none")]
+macro_rules! impl_fn_for_zst {
+ ($(
+ $( #[$attr: meta] )*
+ struct $Name: ident impl$( <$( $lifetime : lifetime ),+> )? Fn =
+ |$( $arg: ident: $ArgTy: ty ),*| -> $ReturnTy: ty
+ $body: block;
+ )+) => {
+ $(
+ $( #[$attr] )*
+ struct $Name;
+
+ impl $( <$( $lifetime ),+> )? Fn<($( $ArgTy, )*)> for $Name {
+ #[inline]
+ extern "rust-call" fn call(&self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy {
+ $body
+ }
+ }
+
+ impl $( <$( $lifetime ),+> )? FnMut<($( $ArgTy, )*)> for $Name {
+ #[inline]
+ extern "rust-call" fn call_mut(
+ &mut self,
+ ($( $arg, )*): ($( $ArgTy, )*)
+ ) -> $ReturnTy {
+ Fn::call(&*self, ($( $arg, )*))
+ }
+ }
+
+ impl $( <$( $lifetime ),+> )? FnOnce<($( $ArgTy, )*)> for $Name {
+ type Output = $ReturnTy;
+
+ #[inline]
+ extern "rust-call" fn call_once(self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy {
+ Fn::call(&self, ($( $arg, )*))
+ }
+ }
+ )+
+ }
+}
--- /dev/null
+//! Compiler intrinsics.
+//!
+//! The corresponding definitions are in `compiler/rustc_codegen_llvm/src/intrinsic.rs`.
+//! The corresponding const implementations are in `compiler/rustc_mir/src/interpret/intrinsics.rs`
+//!
+//! # Const intrinsics
+//!
+//! Note: any changes to the constness of intrinsics should be discussed with the language team.
+//! This includes changes in the stability of the constness.
+//!
+//! In order to make an intrinsic usable at compile-time, one needs to copy the implementation
+//! from <https://github.com/rust-lang/miri/blob/master/src/shims/intrinsics.rs> to
+//! `compiler/rustc_mir/src/interpret/intrinsics.rs` and add a
+//! `#[rustc_const_unstable(feature = "foo", issue = "01234")]` to the intrinsic.
+//!
+//! If an intrinsic is supposed to be used from a `const fn` with a `rustc_const_stable` attribute,
+//! the intrinsic's attribute must be `rustc_const_stable`, too. Such a change should not be done
+//! without T-lang consultation, because it bakes a feature into the language that cannot be
+//! replicated in user code without compiler support.
+//!
+//! # Volatiles
+//!
+//! The volatile intrinsics provide operations intended to act on I/O
+//! memory, which are guaranteed to not be reordered by the compiler
+//! across other volatile intrinsics. See the LLVM documentation on
+//! [[volatile]].
+//!
+//! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
+//!
+//! # Atomics
+//!
+//! The atomic intrinsics provide common atomic operations on machine
+//! words, with multiple possible memory orderings. They obey the same
+//! semantics as C++11. See the LLVM documentation on [[atomics]].
+//!
+//! [atomics]: http://llvm.org/docs/Atomics.html
+//!
+//! A quick refresher on memory ordering:
+//!
+//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
+//! take place after the barrier.
+//! * Release - a barrier for releasing a lock. Preceding reads and writes
+//! take place before the barrier.
+//! * Sequentially consistent - sequentially consistent operations are
+//! guaranteed to happen in order. This is the standard mode for working
+//! with atomic types and is equivalent to Java's `volatile`.
+
+#![unstable(
+ feature = "core_intrinsics",
+ reason = "intrinsics are unlikely to ever be stabilized, instead \
+ they should be used through stabilized interfaces \
+ in the rest of the standard library",
+ issue = "none"
+)]
+#![allow(missing_docs)]
+
+use crate::marker::DiscriminantKind;
+use crate::mem;
+
+// These imports are used for simplifying intra-doc links
+#[allow(unused_imports)]
+#[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
+use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
+
+#[stable(feature = "drop_in_place", since = "1.8.0")]
+#[rustc_deprecated(
+ reason = "no longer an intrinsic - use `ptr::drop_in_place` directly",
+ since = "1.18.0"
+)]
+pub use crate::ptr::drop_in_place;
+
+extern "rust-intrinsic" {
+ // N.B., these intrinsics take raw pointers because they mutate aliased
+ // memory, which is not valid for either `&` or `&mut`.
+
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::SeqCst`] as both the `success` and `failure` parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Acquire`] as both the `success` and `failure` parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Release`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Acquire`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Relaxed`] as both the `success` and `failure` parameters.
+ /// For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Acquire`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::Acquire`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange` method by passing
+ /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange`].
+ pub fn atomic_cxchg_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::SeqCst`] as both the `success` and `failure` parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Acquire`] as both the `success` and `failure` parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Release`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_rel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Acquire`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acqrel<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Relaxed`] as both the `success` and `failure` parameters.
+ /// For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_relaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::SeqCst`] as the `success` and [`Ordering::Acquire`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_failacq<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::Acquire`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acq_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+ /// Stores a value if the current value is the same as the `old` value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `compare_exchange_weak` method by passing
+ /// [`Ordering::AcqRel`] as the `success` and [`Ordering::Relaxed`] as the
+ /// `failure` parameters. For example, [`AtomicBool::compare_exchange_weak`].
+ pub fn atomic_cxchgweak_acqrel_failrelaxed<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
+
+ /// Loads the current value of the pointer.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `load` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
+ pub fn atomic_load<T: Copy>(src: *const T) -> T;
+ /// Loads the current value of the pointer.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `load` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
+ pub fn atomic_load_acq<T: Copy>(src: *const T) -> T;
+ /// Loads the current value of the pointer.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `load` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
+ pub fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
+ pub fn atomic_load_unordered<T: Copy>(src: *const T) -> T;
+
+ /// Stores the value at the specified memory location.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `store` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::store`].
+ pub fn atomic_store<T: Copy>(dst: *mut T, val: T);
+ /// Stores the value at the specified memory location.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `store` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::store`].
+ pub fn atomic_store_rel<T: Copy>(dst: *mut T, val: T);
+ /// Stores the value at the specified memory location.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `store` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::store`].
+ pub fn atomic_store_relaxed<T: Copy>(dst: *mut T, val: T);
+ pub fn atomic_store_unordered<T: Copy>(dst: *mut T, val: T);
+
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Stores the value at the specified memory location, returning the old value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `swap` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::swap`].
+ pub fn atomic_xchg_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Adds to the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_add` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_add`].
+ pub fn atomic_xadd_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Subtract from the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_sub` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicIsize::fetch_sub`].
+ pub fn atomic_xsub_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise and with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_and` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_and`].
+ pub fn atomic_and_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise nand with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`AtomicBool`] type via the `fetch_nand` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_nand`].
+ pub fn atomic_nand_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise or with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_or` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_or`].
+ pub fn atomic_or_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Bitwise xor with the current value, returning the previous value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] types via the `fetch_xor` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::fetch_xor`].
+ pub fn atomic_xor_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_max` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_max`].
+ pub fn atomic_max_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using a signed comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] signed integer types via the `fetch_min` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicI32::fetch_min`].
+ pub fn atomic_min_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Minimum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_min` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_min`].
+ pub fn atomic_umin_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::Acquire`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_acq<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::Release`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_rel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::AcqRel`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_acqrel<T: Copy>(dst: *mut T, src: T) -> T;
+ /// Maximum with the current value using an unsigned comparison.
+ ///
+ /// The stabilized version of this intrinsic is available on the
+ /// [`atomic`] unsigned integer types via the `fetch_max` method by passing
+ /// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicU32::fetch_max`].
+ pub fn atomic_umax_relaxed<T: Copy>(dst: *mut T, src: T) -> T;
+
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_read_data<T>(data: *const T, locality: i32);
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_write_data<T>(data: *const T, locality: i32);
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_read_instruction<T>(data: *const T, locality: i32);
+ /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
+ /// if supported; otherwise, it is a no-op.
+ /// Prefetches have no effect on the behavior of the program but can change its performance
+ /// characteristics.
+ ///
+ /// The `locality` argument must be a constant integer and is a temporal locality specifier
+ /// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn prefetch_write_instruction<T>(data: *const T, locality: i32);
+}
+
+extern "rust-intrinsic" {
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::SeqCst`]
+ /// as the `order`.
+ pub fn atomic_fence();
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::Acquire`]
+ /// as the `order`.
+ pub fn atomic_fence_acq();
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::Release`]
+ /// as the `order`.
+ pub fn atomic_fence_rel();
+ /// An atomic fence.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::fence`] by passing [`Ordering::AcqRel`]
+ /// as the `order`.
+ pub fn atomic_fence_acqrel();
+
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::SeqCst`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence();
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::Acquire`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_acq();
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::Release`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_rel();
+ /// A compiler-only memory barrier.
+ ///
+ /// Memory accesses will never be reordered across this barrier by the
+ /// compiler, but no instructions will be emitted for it. This is
+ /// appropriate for operations on the same thread that may be preempted,
+ /// such as when interacting with signal handlers.
+ ///
+ /// The stabilized version of this intrinsic is available in
+ /// [`atomic::compiler_fence`] by passing [`Ordering::AcqRel`]
+ /// as the `order`.
+ pub fn atomic_singlethreadfence_acqrel();
+
+ /// Magic intrinsic that derives its meaning from attributes
+ /// attached to the function.
+ ///
+ /// For example, dataflow uses this to inject static assertions so
+ /// that `rustc_peek(potentially_uninitialized)` would actually
+ /// double-check that dataflow did indeed compute that it is
+ /// uninitialized at that point in the control flow.
+ ///
+ /// This intrinsic should not be used outside of the compiler.
+ pub fn rustc_peek<T>(_: T) -> T;
+
+ /// Aborts the execution of the process.
+ ///
+ /// A more user-friendly and stable version of this operation is
+ /// [`std::process::abort`](../../std/process/fn.abort.html).
+ pub fn abort() -> !;
+
+ /// Tells LLVM that this point in the code is not reachable, enabling
+ /// further optimizations.
+ ///
+ /// N.B., this is very different from the `unreachable!()` macro: Unlike the
+ /// macro, which panics when it is executed, it is *undefined behavior* to
+ /// reach code marked with this function.
+ ///
+ /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`](crate::hint::unreachable_unchecked).
+ #[rustc_const_unstable(feature = "const_unreachable_unchecked", issue = "53188")]
+ pub fn unreachable() -> !;
+
+ /// Informs the optimizer that a condition is always true.
+ /// If the condition is false, the behavior is undefined.
+ ///
+ /// No code is generated for this intrinsic, but the optimizer will try
+ /// to preserve it (and its condition) between passes, which may interfere
+ /// with optimization of surrounding code and reduce performance. It should
+ /// not be used if the invariant can be discovered by the optimizer on its
+ /// own, or if it does not enable any significant optimizations.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_assume", issue = "76972")]
+ pub fn assume(b: bool);
+
+ /// Hints to the compiler that branch condition is likely to be true.
+ /// Returns the value passed to it.
+ ///
+ /// Any use other than with `if` statements will probably not have an effect.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_likely", issue = "none")]
+ pub fn likely(b: bool) -> bool;
+
+ /// Hints to the compiler that branch condition is likely to be false.
+ /// Returns the value passed to it.
+ ///
+ /// Any use other than with `if` statements will probably not have an effect.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_likely", issue = "none")]
+ pub fn unlikely(b: bool) -> bool;
+
+ /// Executes a breakpoint trap, for inspection by a debugger.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn breakpoint();
+
+ /// The size of a type in bytes.
+ ///
+ /// More specifically, this is the offset in bytes between successive
+ /// items of the same type, including alignment padding.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::size_of`](crate::mem::size_of).
+ #[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
+ pub fn size_of<T>() -> usize;
+
+ /// Moves a value to an uninitialized memory location.
+ ///
+ /// Drop glue is not run on the destination.
+ ///
+ /// The stabilized version of this intrinsic is [`core::ptr::write`](crate::ptr::write).
+ pub fn move_val_init<T>(dst: *mut T, src: T);
+
+ /// The minimum alignment of a type.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::align_of`](crate::mem::align_of).
+ #[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
+ pub fn min_align_of<T>() -> usize;
+ /// The preferred alignment of a type.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_pref_align_of", issue = "none")]
+ pub fn pref_align_of<T>() -> usize;
+
+ /// The size of the referenced value in bytes.
+ ///
+ /// The stabilized version of this intrinsic is [`mem::size_of_val`].
+ #[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
+ pub fn size_of_val<T: ?Sized>(_: *const T) -> usize;
+ /// The required alignment of the referenced value.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::align_of_val`](crate::mem::align_of_val).
+ #[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
+ pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize;
+
+ /// Gets a static string slice containing the name of a type.
+ ///
+ /// The stabilized version of this intrinsic is [`core::any::type_name`](crate::any::type_name).
+ #[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
+ pub fn type_name<T: ?Sized>() -> &'static str;
+
+ /// Gets an identifier which is globally unique to the specified type. This
+ /// function will return the same value for a type regardless of whichever
+ /// crate it is invoked in.
+ ///
+ /// The stabilized version of this intrinsic is [`core::any::TypeId::of`](crate::any::TypeId::of).
+ #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+ pub fn type_id<T: ?Sized + 'static>() -> u64;
+
+ /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
+ /// This will statically either panic, or do nothing.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn assert_inhabited<T>();
+
+ /// A guard for unsafe functions that cannot ever be executed if `T` does not permit
+ /// zero-initialization: This will statically either panic, or do nothing.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn assert_zero_valid<T>();
+
+ /// A guard for unsafe functions that cannot ever be executed if `T` has invalid
+ /// bit patterns: This will statically either panic, or do nothing.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn assert_uninit_valid<T>();
+
+ /// Gets a reference to a static `Location` indicating where it was called.
+ ///
+ /// Consider using [`core::panic::Location::caller`](crate::panic::Location::caller) instead.
+ #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
+ pub fn caller_location() -> &'static crate::panic::Location<'static>;
+
+ /// Moves a value out of scope without running drop glue.
+ ///
+ /// This exists solely for [`mem::forget_unsized`]; normal `forget` uses
+ /// `ManuallyDrop` instead.
+ pub fn forget<T: ?Sized>(_: T);
+
+ /// Reinterprets the bits of a value of one type as another type.
+ ///
+ /// Both types must have the same size. Neither the original, nor the result,
+ /// may be an [invalid value](../../nomicon/what-unsafe-does.html).
+ ///
+ /// `transmute` is semantically equivalent to a bitwise move of one type
+ /// into another. It copies the bits from the source value into the
+ /// destination value, then forgets the original. It's equivalent to C's
+ /// `memcpy` under the hood, just like `transmute_copy`.
+ ///
+ /// `transmute` is **incredibly** unsafe. There are a vast number of ways to
+ /// cause [undefined behavior][ub] with this function. `transmute` should be
+ /// the absolute last resort.
+ ///
+ /// The [nomicon](../../nomicon/transmutes.html) has additional
+ /// documentation.
+ ///
+ /// [ub]: ../../reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// There are a few things that `transmute` is really useful for.
+ ///
+ /// Turning a pointer into a function pointer. This is *not* portable to
+ /// machines where function pointers and data pointers have different sizes.
+ ///
+ /// ```
+ /// fn foo() -> i32 {
+ /// 0
+ /// }
+ /// let pointer = foo as *const ();
+ /// let function = unsafe {
+ /// std::mem::transmute::<*const (), fn() -> i32>(pointer)
+ /// };
+ /// assert_eq!(function(), 0);
+ /// ```
+ ///
+ /// Extending a lifetime, or shortening an invariant lifetime. This is
+ /// advanced, very unsafe Rust!
+ ///
+ /// ```
+ /// struct R<'a>(&'a i32);
+ /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> {
+ /// std::mem::transmute::<R<'b>, R<'static>>(r)
+ /// }
+ ///
+ /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>)
+ /// -> &'b mut R<'c> {
+ /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r)
+ /// }
+ /// ```
+ ///
+ /// # Alternatives
+ ///
+ /// Don't despair: many uses of `transmute` can be achieved through other means.
+ /// Below are common applications of `transmute` which can be replaced with safer
+ /// constructs.
+ ///
+ /// Turning raw bytes(`&[u8]`) to `u32`, `f64`, etc.:
+ ///
+ /// ```
+ /// let raw_bytes = [0x78, 0x56, 0x34, 0x12];
+ ///
+ /// let num = unsafe {
+ /// std::mem::transmute::<[u8; 4], u32>(raw_bytes)
+ /// };
+ ///
+ /// // use `u32::from_ne_bytes` instead
+ /// let num = u32::from_ne_bytes(raw_bytes);
+ /// // or use `u32::from_le_bytes` or `u32::from_be_bytes` to specify the endianness
+ /// let num = u32::from_le_bytes(raw_bytes);
+ /// assert_eq!(num, 0x12345678);
+ /// let num = u32::from_be_bytes(raw_bytes);
+ /// assert_eq!(num, 0x78563412);
+ /// ```
+ ///
+ /// Turning a pointer into a `usize`:
+ ///
+ /// ```
+ /// let ptr = &0;
+ /// let ptr_num_transmute = unsafe {
+ /// std::mem::transmute::<&i32, usize>(ptr)
+ /// };
+ ///
+ /// // Use an `as` cast instead
+ /// let ptr_num_cast = ptr as *const i32 as usize;
+ /// ```
+ ///
+ /// Turning a `*mut T` into an `&mut T`:
+ ///
+ /// ```
+ /// let ptr: *mut i32 = &mut 0;
+ /// let ref_transmuted = unsafe {
+ /// std::mem::transmute::<*mut i32, &mut i32>(ptr)
+ /// };
+ ///
+ /// // Use a reborrow instead
+ /// let ref_casted = unsafe { &mut *ptr };
+ /// ```
+ ///
+ /// Turning an `&mut T` into an `&mut U`:
+ ///
+ /// ```
+ /// let ptr = &mut 0;
+ /// let val_transmuted = unsafe {
+ /// std::mem::transmute::<&mut i32, &mut u32>(ptr)
+ /// };
+ ///
+ /// // Now, put together `as` and reborrowing - note the chaining of `as`
+ /// // `as` is not transitive
+ /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) };
+ /// ```
+ ///
+ /// Turning an `&str` into an `&[u8]`:
+ ///
+ /// ```
+ /// // this is not a good way to do this.
+ /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") };
+ /// assert_eq!(slice, &[82, 117, 115, 116]);
+ ///
+ /// // You could use `str::as_bytes`
+ /// let slice = "Rust".as_bytes();
+ /// assert_eq!(slice, &[82, 117, 115, 116]);
+ ///
+ /// // Or, just use a byte string, if you have control over the string
+ /// // literal
+ /// assert_eq!(b"Rust", &[82, 117, 115, 116]);
+ /// ```
+ ///
+ /// Turning a `Vec<&T>` into a `Vec<Option<&T>>`:
+ ///
+ /// ```
+ /// let store = [0, 1, 2, 3];
+ /// let v_orig = store.iter().collect::<Vec<&i32>>();
+ ///
+ /// // clone the vector as we will reuse them later
+ /// let v_clone = v_orig.clone();
+ ///
+ /// // Using transmute: this relies on the unspecified data layout of `Vec`, which is a
+ /// // bad idea and could cause Undefined Behavior.
+ /// // However, it is no-copy.
+ /// let v_transmuted = unsafe {
+ /// std::mem::transmute::<Vec<&i32>, Vec<Option<&i32>>>(v_clone)
+ /// };
+ ///
+ /// let v_clone = v_orig.clone();
+ ///
+ /// // This is the suggested, safe way.
+ /// // It does copy the entire vector, though, into a new array.
+ /// let v_collected = v_clone.into_iter()
+ /// .map(Some)
+ /// .collect::<Vec<Option<&i32>>>();
+ ///
+ /// let v_clone = v_orig.clone();
+ ///
+ /// // The no-copy, unsafe way, still using transmute, but not relying on the data layout.
+ /// // Like the first approach, this reuses the `Vec` internals.
+ /// // Therefore, the new inner type must have the
+ /// // exact same size, *and the same alignment*, as the old type.
+ /// // The same caveats exist for this method as transmute, for
+ /// // the original inner type (`&i32`) to the converted inner type
+ /// // (`Option<&i32>`), so read the nomicon pages linked above and also
+ /// // consult the [`from_raw_parts`] documentation.
+ /// let v_from_raw = unsafe {
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Ensure the original vector is not dropped.
+ /// let mut v_clone = std::mem::ManuallyDrop::new(v_clone);
+ /// Vec::from_raw_parts(v_clone.as_mut_ptr() as *mut Option<&i32>,
+ /// v_clone.len(),
+ /// v_clone.capacity())
+ /// };
+ /// ```
+ ///
+ /// [`from_raw_parts`]: ../../std/vec/struct.Vec.html#method.from_raw_parts
+ ///
+ /// Implementing `split_at_mut`:
+ ///
+ /// ```
+ /// use std::{slice, mem};
+ ///
+ /// // There are multiple ways to do this, and there are multiple problems
+ /// // with the following (transmute) way.
+ /// fn split_at_mut_transmute<T>(slice: &mut [T], mid: usize)
+ /// -> (&mut [T], &mut [T]) {
+ /// let len = slice.len();
+ /// assert!(mid <= len);
+ /// unsafe {
+ /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice);
+ /// // first: transmute is not type safe; all it checks is that T and
+ /// // U are of the same size. Second, right here, you have two
+ /// // mutable references pointing to the same memory.
+ /// (&mut slice[0..mid], &mut slice2[mid..len])
+ /// }
+ /// }
+ ///
+ /// // This gets rid of the type safety problems; `&mut *` will *only* give
+ /// // you an `&mut T` from an `&mut T` or `*mut T`.
+ /// fn split_at_mut_casts<T>(slice: &mut [T], mid: usize)
+ /// -> (&mut [T], &mut [T]) {
+ /// let len = slice.len();
+ /// assert!(mid <= len);
+ /// unsafe {
+ /// let slice2 = &mut *(slice as *mut [T]);
+ /// // however, you still have two mutable references pointing to
+ /// // the same memory.
+ /// (&mut slice[0..mid], &mut slice2[mid..len])
+ /// }
+ /// }
+ ///
+ /// // This is how the standard library does it. This is the best method, if
+ /// // you need to do something like this
+ /// fn split_at_stdlib<T>(slice: &mut [T], mid: usize)
+ /// -> (&mut [T], &mut [T]) {
+ /// let len = slice.len();
+ /// assert!(mid <= len);
+ /// unsafe {
+ /// let ptr = slice.as_mut_ptr();
+ /// // This now has three mutable references pointing at the same
+ /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1.
+ /// // `slice` is never used after `let ptr = ...`, and so one can
+ /// // treat it as "dead", and therefore, you only have two real
+ /// // mutable slices.
+ /// (slice::from_raw_parts_mut(ptr, mid),
+ /// slice::from_raw_parts_mut(ptr.add(mid), len - mid))
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ // NOTE: While this makes the intrinsic const stable, we have some custom code in const fn
+ // checks that prevent its use within `const fn`.
+ #[rustc_const_stable(feature = "const_transmute", since = "1.46.0")]
+ #[rustc_diagnostic_item = "transmute"]
+ pub fn transmute<T, U>(e: T) -> U;
+
+ /// Returns `true` if the actual type given as `T` requires drop
+ /// glue; returns `false` if the actual type provided for `T`
+ /// implements `Copy`.
+ ///
+ /// If the actual type neither requires drop glue nor implements
+ /// `Copy`, then the return value of this function is unspecified.
+ ///
+ /// The stabilized version of this intrinsic is [`mem::needs_drop`](crate::mem::needs_drop).
+ #[rustc_const_stable(feature = "const_needs_drop", since = "1.40.0")]
+ pub fn needs_drop<T>() -> bool;
+
+ /// Calculates the offset from a pointer.
+ ///
+ /// This is implemented as an intrinsic to avoid converting to and from an
+ /// integer, since the conversion would throw away aliasing information.
+ ///
+ /// # Safety
+ ///
+ /// Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of an allocated object. If either pointer is out of
+ /// bounds or arithmetic overflow occurs then any further use of the
+ /// returned value will result in undefined behavior.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::pointer::offset`](../../std/primitive.pointer.html#method.offset).
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
+
+ /// Calculates the offset from a pointer, potentially wrapping.
+ ///
+ /// This is implemented as an intrinsic to avoid converting to and from an
+ /// integer, since the conversion inhibits certain optimizations.
+ ///
+ /// # Safety
+ ///
+ /// Unlike the `offset` intrinsic, this intrinsic does not restrict the
+ /// resulting pointer to point into or one byte past the end of an allocated
+ /// object, and it wraps with two's complement arithmetic. The resulting
+ /// value is not necessarily valid to be used to actually access memory.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::pointer::wrapping_offset`](../../std/primitive.pointer.html#method.wrapping_offset).
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
+
+ /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
+ /// a size of `count` * `size_of::<T>()` and an alignment of
+ /// `min_align_of::<T>()`
+ ///
+ /// The volatile parameter is set to `true`, so it will not be optimized out
+ /// unless size is equal to zero.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: usize);
+ /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
+ /// a size of `count` * `size_of::<T>()` and an alignment of
+ /// `min_align_of::<T>()`
+ ///
+ /// The volatile parameter is set to `true`, so it will not be optimized out
+ /// unless size is equal to zero.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: usize);
+ /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
+ /// size of `count` * `size_of::<T>()` and an alignment of
+ /// `min_align_of::<T>()`.
+ ///
+ /// The volatile parameter is set to `true`, so it will not be optimized out
+ /// unless size is equal to zero.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: usize);
+
+ /// Performs a volatile load from the `src` pointer.
+ ///
+ /// The stabilized version of this intrinsic is [`core::ptr::read_volatile`](crate::ptr::read_volatile).
+ pub fn volatile_load<T>(src: *const T) -> T;
+ /// Performs a volatile store to the `dst` pointer.
+ ///
+ /// The stabilized version of this intrinsic is [`core::ptr::write_volatile`](crate::ptr::write_volatile).
+ pub fn volatile_store<T>(dst: *mut T, val: T);
+
+ /// Performs a volatile load from the `src` pointer
+ /// The pointer is not required to be aligned.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn unaligned_volatile_load<T>(src: *const T) -> T;
+ /// Performs a volatile store to the `dst` pointer.
+ /// The pointer is not required to be aligned.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn unaligned_volatile_store<T>(dst: *mut T, val: T);
+
+ /// Returns the square root of an `f32`
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::sqrt`](../../std/primitive.f32.html#method.sqrt)
+ pub fn sqrtf32(x: f32) -> f32;
+ /// Returns the square root of an `f64`
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::sqrt`](../../std/primitive.f64.html#method.sqrt)
+ pub fn sqrtf64(x: f64) -> f64;
+
+ /// Raises an `f32` to an integer power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::powi`](../../std/primitive.f32.html#method.powi)
+ pub fn powif32(a: f32, x: i32) -> f32;
+ /// Raises an `f64` to an integer power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::powi`](../../std/primitive.f64.html#method.powi)
+ pub fn powif64(a: f64, x: i32) -> f64;
+
+ /// Returns the sine of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::sin`](../../std/primitive.f32.html#method.sin)
+ pub fn sinf32(x: f32) -> f32;
+ /// Returns the sine of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::sin`](../../std/primitive.f64.html#method.sin)
+ pub fn sinf64(x: f64) -> f64;
+
+ /// Returns the cosine of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::cos`](../../std/primitive.f32.html#method.cos)
+ pub fn cosf32(x: f32) -> f32;
+ /// Returns the cosine of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::cos`](../../std/primitive.f64.html#method.cos)
+ pub fn cosf64(x: f64) -> f64;
+
+ /// Raises an `f32` to an `f32` power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::powf`](../../std/primitive.f32.html#method.powf)
+ pub fn powf32(a: f32, x: f32) -> f32;
+ /// Raises an `f64` to an `f64` power.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::powf`](../../std/primitive.f64.html#method.powf)
+ pub fn powf64(a: f64, x: f64) -> f64;
+
+ /// Returns the exponential of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::exp`](../../std/primitive.f32.html#method.exp)
+ pub fn expf32(x: f32) -> f32;
+ /// Returns the exponential of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::exp`](../../std/primitive.f64.html#method.exp)
+ pub fn expf64(x: f64) -> f64;
+
+ /// Returns 2 raised to the power of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::exp2`](../../std/primitive.f32.html#method.exp2)
+ pub fn exp2f32(x: f32) -> f32;
+ /// Returns 2 raised to the power of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::exp2`](../../std/primitive.f64.html#method.exp2)
+ pub fn exp2f64(x: f64) -> f64;
+
+ /// Returns the natural logarithm of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::ln`](../../std/primitive.f32.html#method.ln)
+ pub fn logf32(x: f32) -> f32;
+ /// Returns the natural logarithm of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::ln`](../../std/primitive.f64.html#method.ln)
+ pub fn logf64(x: f64) -> f64;
+
+ /// Returns the base 10 logarithm of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::log10`](../../std/primitive.f32.html#method.log10)
+ pub fn log10f32(x: f32) -> f32;
+ /// Returns the base 10 logarithm of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::log10`](../../std/primitive.f64.html#method.log10)
+ pub fn log10f64(x: f64) -> f64;
+
+ /// Returns the base 2 logarithm of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::log2`](../../std/primitive.f32.html#method.log2)
+ pub fn log2f32(x: f32) -> f32;
+ /// Returns the base 2 logarithm of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::log2`](../../std/primitive.f64.html#method.log2)
+ pub fn log2f64(x: f64) -> f64;
+
+ /// Returns `a * b + c` for `f32` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::mul_add`](../../std/primitive.f32.html#method.mul_add)
+ pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
+ /// Returns `a * b + c` for `f64` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::mul_add`](../../std/primitive.f64.html#method.mul_add)
+ pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
+
+ /// Returns the absolute value of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::abs`](../../std/primitive.f32.html#method.abs)
+ pub fn fabsf32(x: f32) -> f32;
+ /// Returns the absolute value of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::abs`](../../std/primitive.f64.html#method.abs)
+ pub fn fabsf64(x: f64) -> f64;
+
+ /// Returns the minimum of two `f32` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::min`]
+ pub fn minnumf32(x: f32, y: f32) -> f32;
+ /// Returns the minimum of two `f64` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::min`]
+ pub fn minnumf64(x: f64, y: f64) -> f64;
+ /// Returns the maximum of two `f32` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f32::max`]
+ pub fn maxnumf32(x: f32, y: f32) -> f32;
+ /// Returns the maximum of two `f64` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`f64::max`]
+ pub fn maxnumf64(x: f64, y: f64) -> f64;
+
+ /// Copies the sign from `y` to `x` for `f32` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::copysign`](../../std/primitive.f32.html#method.copysign)
+ pub fn copysignf32(x: f32, y: f32) -> f32;
+ /// Copies the sign from `y` to `x` for `f64` values.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::copysign`](../../std/primitive.f64.html#method.copysign)
+ pub fn copysignf64(x: f64, y: f64) -> f64;
+
+ /// Returns the largest integer less than or equal to an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::floor`](../../std/primitive.f32.html#method.floor)
+ pub fn floorf32(x: f32) -> f32;
+ /// Returns the largest integer less than or equal to an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::floor`](../../std/primitive.f64.html#method.floor)
+ pub fn floorf64(x: f64) -> f64;
+
+ /// Returns the smallest integer greater than or equal to an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::ceil`](../../std/primitive.f32.html#method.ceil)
+ pub fn ceilf32(x: f32) -> f32;
+ /// Returns the smallest integer greater than or equal to an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::ceil`](../../std/primitive.f64.html#method.ceil)
+ pub fn ceilf64(x: f64) -> f64;
+
+ /// Returns the integer part of an `f32`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::trunc`](../../std/primitive.f32.html#method.trunc)
+ pub fn truncf32(x: f32) -> f32;
+ /// Returns the integer part of an `f64`.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::trunc`](../../std/primitive.f64.html#method.trunc)
+ pub fn truncf64(x: f64) -> f64;
+
+ /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
+ /// if the argument is not an integer.
+ pub fn rintf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
+ /// if the argument is not an integer.
+ pub fn rintf64(x: f64) -> f64;
+
+ /// Returns the nearest integer to an `f32`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn nearbyintf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn nearbyintf64(x: f64) -> f64;
+
+ /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f32::round`](../../std/primitive.f32.html#method.round)
+ pub fn roundf32(x: f32) -> f32;
+ /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
+ ///
+ /// The stabilized version of this intrinsic is
+ /// [`std::f64::round`](../../std/primitive.f64.html#method.round)
+ pub fn roundf64(x: f64) -> f64;
+
+ /// Float addition that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fadd_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float subtraction that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fsub_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float multiplication that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fmul_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float division that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn fdiv_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Float remainder that allows optimizations based on algebraic rules.
+ /// May assume inputs are finite.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn frem_fast<T: Copy>(a: T, b: T) -> T;
+
+ /// Convert with LLVM’s fptoui/fptosi, which may return undef for values out of range
+ /// (<https://github.com/rust-lang/rust/issues/10184>)
+ ///
+ /// Stabilized as [`f32::to_int_unchecked`] and [`f64::to_int_unchecked`].
+ pub fn float_to_int_unchecked<Float: Copy, Int: Copy>(value: Float) -> Int;
+
+ /// Returns the number of bits set in an integer type `T`
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `count_ones` method. For example,
+ /// [`u32::count_ones`]
+ #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")]
+ pub fn ctpop<T: Copy>(x: T) -> T;
+
+ /// Returns the number of leading unset bits (zeroes) in an integer type `T`.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `leading_zeros` method. For example,
+ /// [`u32::leading_zeros`]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz;
+ ///
+ /// let x = 0b0001_1100_u8;
+ /// let num_leading = ctlz(x);
+ /// assert_eq!(num_leading, 3);
+ /// ```
+ ///
+ /// An `x` with value `0` will return the bit width of `T`.
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz;
+ ///
+ /// let x = 0u16;
+ /// let num_leading = ctlz(x);
+ /// assert_eq!(num_leading, 16);
+ /// ```
+ #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")]
+ pub fn ctlz<T: Copy>(x: T) -> T;
+
+ /// Like `ctlz`, but extra-unsafe as it returns `undef` when
+ /// given an `x` with value `0`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::ctlz_nonzero;
+ ///
+ /// let x = 0b0001_1100_u8;
+ /// let num_leading = unsafe { ctlz_nonzero(x) };
+ /// assert_eq!(num_leading, 3);
+ /// ```
+ #[rustc_const_unstable(feature = "constctlz", issue = "none")]
+ pub fn ctlz_nonzero<T: Copy>(x: T) -> T;
+
+ /// Returns the number of trailing unset bits (zeroes) in an integer type `T`.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `trailing_zeros` method. For example,
+ /// [`u32::trailing_zeros`]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz;
+ ///
+ /// let x = 0b0011_1000_u8;
+ /// let num_trailing = cttz(x);
+ /// assert_eq!(num_trailing, 3);
+ /// ```
+ ///
+ /// An `x` with value `0` will return the bit width of `T`:
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz;
+ ///
+ /// let x = 0u16;
+ /// let num_trailing = cttz(x);
+ /// assert_eq!(num_trailing, 16);
+ /// ```
+ #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")]
+ pub fn cttz<T: Copy>(x: T) -> T;
+
+ /// Like `cttz`, but extra-unsafe as it returns `undef` when
+ /// given an `x` with value `0`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(core_intrinsics)]
+ ///
+ /// use std::intrinsics::cttz_nonzero;
+ ///
+ /// let x = 0b0011_1000_u8;
+ /// let num_trailing = unsafe { cttz_nonzero(x) };
+ /// assert_eq!(num_trailing, 3);
+ /// ```
+ #[rustc_const_unstable(feature = "const_cttz", issue = "none")]
+ pub fn cttz_nonzero<T: Copy>(x: T) -> T;
+
+ /// Reverses the bytes in an integer type `T`.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `swap_bytes` method. For example,
+ /// [`u32::swap_bytes`]
+ #[rustc_const_stable(feature = "const_bswap", since = "1.40.0")]
+ pub fn bswap<T: Copy>(x: T) -> T;
+
+ /// Reverses the bits in an integer type `T`.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `reverse_bits` method. For example,
+ /// [`u32::reverse_bits`]
+ #[rustc_const_stable(feature = "const_bitreverse", since = "1.40.0")]
+ pub fn bitreverse<T: Copy>(x: T) -> T;
+
+ /// Performs checked integer addition.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `overflowing_add` method. For example,
+ /// [`u32::overflowing_add`]
+ #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ pub fn add_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
+
+ /// Performs checked integer subtraction
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `overflowing_sub` method. For example,
+ /// [`u32::overflowing_sub`]
+ #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ pub fn sub_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
+
+ /// Performs checked integer multiplication
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `overflowing_mul` method. For example,
+ /// [`u32::overflowing_mul`]
+ #[rustc_const_stable(feature = "const_int_overflow", since = "1.40.0")]
+ pub fn mul_with_overflow<T: Copy>(x: T, y: T) -> (T, bool);
+
+ /// Performs an exact division, resulting in undefined behavior where
+ /// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ pub fn exact_div<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs an unchecked division, resulting in undefined behavior
+ /// where y = 0 or x = `T::MIN` and y = -1
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_div` method. For example,
+ /// [`u32::checked_div`]
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_div<T: Copy>(x: T, y: T) -> T;
+ /// Returns the remainder of an unchecked division, resulting in
+ /// undefined behavior where y = 0 or x = `T::MIN` and y = -1
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_rem` method. For example,
+ /// [`u32::checked_rem`]
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_rem<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs an unchecked left shift, resulting in undefined behavior when
+ /// y < 0 or y >= N, where N is the width of T in bits.
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_shl` method. For example,
+ /// [`u32::checked_shl`]
+ #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
+ pub fn unchecked_shl<T: Copy>(x: T, y: T) -> T;
+ /// Performs an unchecked right shift, resulting in undefined behavior when
+ /// y < 0 or y >= N, where N is the width of T in bits.
+ ///
+ /// Safe wrappers for this intrinsic are available on the integer
+ /// primitives via the `checked_shr` method. For example,
+ /// [`u32::checked_shr`]
+ #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")]
+ pub fn unchecked_shr<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns the result of an unchecked addition, resulting in
+ /// undefined behavior when `x + y > T::MAX` or `x + y < T::MIN`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_add<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns the result of an unchecked subtraction, resulting in
+ /// undefined behavior when `x - y > T::MAX` or `x - y < T::MIN`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_sub<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns the result of an unchecked multiplication, resulting in
+ /// undefined behavior when `x * y > T::MAX` or `x * y < T::MIN`.
+ ///
+ /// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_int_unchecked_arith", issue = "none")]
+ pub fn unchecked_mul<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs rotate left.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `rotate_left` method. For example,
+ /// [`u32::rotate_left`]
+ #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
+ pub fn rotate_left<T: Copy>(x: T, y: T) -> T;
+
+ /// Performs rotate right.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `rotate_right` method. For example,
+ /// [`u32::rotate_right`]
+ #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")]
+ pub fn rotate_right<T: Copy>(x: T, y: T) -> T;
+
+ /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `wrapping_add` method. For example,
+ /// [`u32::wrapping_add`]
+ #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ pub fn wrapping_add<T: Copy>(a: T, b: T) -> T;
+ /// Returns (a - b) mod 2<sup>N</sup>, where N is the width of T in bits.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `wrapping_sub` method. For example,
+ /// [`u32::wrapping_sub`]
+ #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ pub fn wrapping_sub<T: Copy>(a: T, b: T) -> T;
+ /// Returns (a * b) mod 2<sup>N</sup>, where N is the width of T in bits.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `wrapping_mul` method. For example,
+ /// [`u32::wrapping_mul`]
+ #[rustc_const_stable(feature = "const_int_wrapping", since = "1.40.0")]
+ pub fn wrapping_mul<T: Copy>(a: T, b: T) -> T;
+
+ /// Computes `a + b`, while saturating at numeric bounds.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `saturating_add` method. For example,
+ /// [`u32::saturating_add`]
+ #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
+ pub fn saturating_add<T: Copy>(a: T, b: T) -> T;
+ /// Computes `a - b`, while saturating at numeric bounds.
+ ///
+ /// The stabilized versions of this intrinsic are available on the integer
+ /// primitives via the `saturating_sub` method. For example,
+ /// [`u32::saturating_sub`]
+ #[rustc_const_stable(feature = "const_int_saturating", since = "1.40.0")]
+ pub fn saturating_sub<T: Copy>(a: T, b: T) -> T;
+
+ /// Returns the value of the discriminant for the variant in 'v',
+ /// cast to a `u64`; if `T` has no discriminant, returns 0.
+ ///
+ /// The stabilized version of this intrinsic is [`core::mem::discriminant`](crate::mem::discriminant).
+ #[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+ pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
+
+ /// Returns the number of variants of the type `T` cast to a `usize`;
+ /// if `T` has no variants, returns 0. Uninhabited variants will be counted.
+ ///
+ /// The to-be-stabilized version of this intrinsic is [`mem::variant_count`].
+ #[rustc_const_unstable(feature = "variant_count", issue = "73662")]
+ pub fn variant_count<T>() -> usize;
+
+ /// Rust's "try catch" construct which invokes the function pointer `try_fn`
+ /// with the data pointer `data`.
+ ///
+ /// The third argument is a function called if a panic occurs. This function
+ /// takes the data pointer and a pointer to the target-specific exception
+ /// object that was caught. For more information see the compiler's
+ /// source as well as std's catch implementation.
+ pub fn r#try(try_fn: fn(*mut u8), data: *mut u8, catch_fn: fn(*mut u8, *mut u8)) -> i32;
+
+ /// Emits a `!nontemporal` store according to LLVM (see their docs).
+ /// Probably will never become stable.
+ pub fn nontemporal_store<T>(ptr: *mut T, val: T);
+
+ /// See documentation of `<*const T>::offset_from` for details.
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "41079")]
+ pub fn ptr_offset_from<T>(ptr: *const T, base: *const T) -> isize;
+
+ /// See documentation of `<*const T>::guaranteed_eq` for details.
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ pub fn ptr_guaranteed_eq<T>(ptr: *const T, other: *const T) -> bool;
+
+ /// See documentation of `<*const T>::guaranteed_ne` for details.
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ pub fn ptr_guaranteed_ne<T>(ptr: *const T, other: *const T) -> bool;
+}
+
+// Some functions are defined here because they accidentally got made
+// available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
+// (`transmute` also falls into this category, but it cannot be wrapped due to the
+// check that `T` and `U` have the same size.)
+
+/// Checks whether `ptr` is properly aligned with respect to
+/// `align_of::<T>()`.
+pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
+ !ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0
+}
+
+/// Checks whether the regions of memory starting at `src` and `dst` of size
+/// `count * size_of::<T>()` do *not* overlap.
+pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
+ let src_usize = src as usize;
+ let dst_usize = dst as usize;
+ let size = mem::size_of::<T>().checked_mul(count).unwrap();
+ let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
+ // If the absolute distance between the ptrs is at least as big as the size of the buffer,
+ // they do not overlap.
+ diff >= size
+}
+
+/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
+/// and destination must *not* overlap.
+///
+/// For regions of memory which might overlap, use [`copy`] instead.
+///
+/// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but
+/// with the argument order swapped.
+///
+/// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
+///
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+///
+/// * Both `src` and `dst` must be properly aligned.
+///
+/// * The region of memory beginning at `src` with a size of `count *
+/// size_of::<T>()` bytes must *not* overlap with the region of memory
+/// beginning at `dst` with the same size.
+///
+/// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
+/// in the region beginning at `*src` and the region beginning at `*dst` can
+/// [violate memory safety][read-ownership].
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is
+/// `0`, the pointers must be non-NULL and properly aligned.
+///
+/// [`read`]: crate::ptr::read
+/// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
+/// [valid]: crate::ptr#safety
+///
+/// # Examples
+///
+/// Manually implement [`Vec::append`]:
+///
+/// ```
+/// use std::ptr;
+///
+/// /// Moves all the elements of `src` into `dst`, leaving `src` empty.
+/// fn append<T>(dst: &mut Vec<T>, src: &mut Vec<T>) {
+/// let src_len = src.len();
+/// let dst_len = dst.len();
+///
+/// // Ensure that `dst` has enough capacity to hold all of `src`.
+/// dst.reserve(src_len);
+///
+/// unsafe {
+/// // The call to offset is always safe because `Vec` will never
+/// // allocate more than `isize::MAX` bytes.
+/// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize);
+/// let src_ptr = src.as_ptr();
+///
+/// // Truncate `src` without dropping its contents. We do this first,
+/// // to avoid problems in case something further down panics.
+/// src.set_len(0);
+///
+/// // The two regions cannot overlap because mutable references do
+/// // not alias, and two different vectors cannot own the same
+/// // memory.
+/// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len);
+///
+/// // Notify `dst` that it now holds the contents of `src`.
+/// dst.set_len(dst_len + src_len);
+/// }
+/// }
+///
+/// let mut a = vec!['r'];
+/// let mut b = vec!['u', 's', 't'];
+///
+/// append(&mut a, &mut b);
+///
+/// assert_eq!(a, &['r', 'u', 's', 't']);
+/// assert!(b.is_empty());
+/// ```
+///
+/// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append
+#[doc(alias = "memcpy")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[inline]
+pub unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
+ extern "rust-intrinsic" {
+ fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ if cfg!(debug_assertions)
+ && !(is_aligned_and_not_null(src)
+ && is_aligned_and_not_null(dst)
+ && is_nonoverlapping(src, dst, count))
+ {
+ // Not panicking to keep codegen impact smaller.
+ abort();
+ }
+
+ // SAFETY: the safety contract for `copy_nonoverlapping` must be
+ // upheld by the caller.
+ unsafe { copy_nonoverlapping(src, dst, count) }
+}
+
+/// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
+/// and destination may overlap.
+///
+/// If the source and destination will *never* overlap,
+/// [`copy_nonoverlapping`] can be used instead.
+///
+/// `copy` is semantically equivalent to C's [`memmove`], but with the argument
+/// order swapped. Copying takes place as if the bytes were copied from `src`
+/// to a temporary array and then copied from the array to `dst`.
+///
+/// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads of `count * size_of::<T>()` bytes.
+///
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+///
+/// * Both `src` and `dst` must be properly aligned.
+///
+/// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values
+/// in the region beginning at `*src` and the region beginning at `*dst` can
+/// [violate memory safety][read-ownership].
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is
+/// `0`, the pointers must be non-NULL and properly aligned.
+///
+/// [`read`]: crate::ptr::read
+/// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
+/// [valid]: crate::ptr#safety
+///
+/// # Examples
+///
+/// Efficiently create a Rust vector from an unsafe buffer:
+///
+/// ```
+/// use std::ptr;
+///
+/// /// # Safety
+/// ///
+/// /// * `ptr` must be correctly aligned for its type and non-zero.
+/// /// * `ptr` must be valid for reads of `elts` contiguous elements of type `T`.
+/// /// * Those elements must not be used after calling this function unless `T: Copy`.
+/// # #[allow(dead_code)]
+/// unsafe fn from_buf_raw<T>(ptr: *const T, elts: usize) -> Vec<T> {
+/// let mut dst = Vec::with_capacity(elts);
+///
+/// // SAFETY: Our precondition ensures the source is aligned and valid,
+/// // and `Vec::with_capacity` ensures that we have usable space to write them.
+/// ptr::copy(ptr, dst.as_mut_ptr(), elts);
+///
+/// // SAFETY: We created it with this much capacity earlier,
+/// // and the previous `copy` has initialized these elements.
+/// dst.set_len(elts);
+/// dst
+/// }
+/// ```
+#[doc(alias = "memmove")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[inline]
+pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
+ extern "rust-intrinsic" {
+ fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ }
+
+ if cfg!(debug_assertions) && !(is_aligned_and_not_null(src) && is_aligned_and_not_null(dst)) {
+ // Not panicking to keep codegen impact smaller.
+ abort();
+ }
+
+ // SAFETY: the safety contract for `copy` must be upheld by the caller.
+ unsafe { copy(src, dst, count) }
+}
+
+/// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
+/// `val`.
+///
+/// `write_bytes` is similar to C's [`memset`], but sets `count *
+/// size_of::<T>()` bytes to `val`.
+///
+/// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes of `count * size_of::<T>()` bytes.
+///
+/// * `dst` must be properly aligned.
+///
+/// Additionally, the caller must ensure that writing `count *
+/// size_of::<T>()` bytes to the given region of memory results in a valid
+/// value of `T`. Using a region of memory typed as a `T` that contains an
+/// invalid value of `T` is undefined behavior.
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is
+/// `0`, the pointer must be non-NULL and properly aligned.
+///
+/// [valid]: crate::ptr#safety
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut vec = vec![0u32; 4];
+/// unsafe {
+/// let vec_ptr = vec.as_mut_ptr();
+/// ptr::write_bytes(vec_ptr, 0xfe, 2);
+/// }
+/// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]);
+/// ```
+///
+/// Creating an invalid value:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut v = Box::new(0i32);
+///
+/// unsafe {
+/// // Leaks the previously held value by overwriting the `Box<T>` with
+/// // a null pointer.
+/// ptr::write_bytes(&mut v as *mut Box<i32>, 0, 1);
+/// }
+///
+/// // At this point, using or dropping `v` results in undefined behavior.
+/// // drop(v); // ERROR
+///
+/// // Even leaking `v` "uses" it, and hence is undefined behavior.
+/// // mem::forget(v); // ERROR
+///
+/// // In fact, `v` is invalid according to basic type layout invariants, so *any*
+/// // operation touching it is undefined behavior.
+/// // let v2 = v; // ERROR
+///
+/// unsafe {
+/// // Let us instead put in a valid value
+/// ptr::write(&mut v as *mut Box<i32>, Box::new(42i32));
+/// }
+///
+/// // Now the box is fine
+/// assert_eq!(*v, 42);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[inline]
+pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
+ extern "rust-intrinsic" {
+ fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+
+ debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer");
+
+ // SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
+ unsafe { write_bytes(dst, val, count) }
+}
--- /dev/null
+use crate::iter::{DoubleEndedIterator, FusedIterator, Iterator, TrustedLen};
+use crate::ops::Try;
+use crate::usize;
+
+/// An iterator that links two iterators together, in a chain.
+///
+/// This `struct` is created by [`Iterator::chain`]. See its documentation
+/// for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::iter::Chain;
+/// use std::slice::Iter;
+///
+/// let a1 = [1, 2, 3];
+/// let a2 = [4, 5, 6];
+/// let iter: Chain<Iter<_>, Iter<_>> = a1.iter().chain(a2.iter());
+/// ```
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Chain<A, B> {
+ // These are "fused" with `Option` so we don't need separate state to track which part is
+ // already exhausted, and we may also get niche layout for `None`. We don't use the real `Fuse`
+ // adapter because its specialization for `FusedIterator` unconditionally descends into the
+ // iterator, and that could be expensive to keep revisiting stuff like nested chains. It also
+ // hurts compiler performance to add more iterator layers to `Chain`.
+ //
+ // Only the "first" iterator is actually set `None` when exhausted, depending on whether you
+ // iterate forward or backward. If you mix directions, then both sides may be `None`.
+ a: Option<A>,
+ b: Option<B>,
+}
+impl<A, B> Chain<A, B> {
+ pub(in super::super) fn new(a: A, b: B) -> Chain<A, B> {
+ Chain { a: Some(a), b: Some(b) }
+ }
+}
+
+/// Fuse the iterator if the expression is `None`.
+macro_rules! fuse {
+ ($self:ident . $iter:ident . $($call:tt)+) => {
+ match $self.$iter {
+ Some(ref mut iter) => match iter.$($call)+ {
+ None => {
+ $self.$iter = None;
+ None
+ }
+ item => item,
+ },
+ None => None,
+ }
+ };
+}
+
+/// Try an iterator method without fusing,
+/// like an inline `.as_mut().and_then(...)`
+macro_rules! maybe {
+ ($self:ident . $iter:ident . $($call:tt)+) => {
+ match $self.$iter {
+ Some(ref mut iter) => iter.$($call)+,
+ None => None,
+ }
+ };
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> Iterator for Chain<A, B>
+where
+ A: Iterator,
+ B: Iterator<Item = A::Item>,
+{
+ type Item = A::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<A::Item> {
+ match fuse!(self.a.next()) {
+ None => maybe!(self.b.next()),
+ item => item,
+ }
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn count(self) -> usize {
+ let a_count = match self.a {
+ Some(a) => a.count(),
+ None => 0,
+ };
+ let b_count = match self.b {
+ Some(b) => b.count(),
+ None => 0,
+ };
+ a_count + b_count
+ }
+
+ fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ if let Some(ref mut a) = self.a {
+ acc = a.try_fold(acc, &mut f)?;
+ self.a = None;
+ }
+ if let Some(ref mut b) = self.b {
+ acc = b.try_fold(acc, f)?;
+ // we don't fuse the second iterator
+ }
+ try { acc }
+ }
+
+ fn fold<Acc, F>(self, mut acc: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(a) = self.a {
+ acc = a.fold(acc, &mut f);
+ }
+ if let Some(b) = self.b {
+ acc = b.fold(acc, f);
+ }
+ acc
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+
+ if let Some(ref mut a) = self.a {
+ match a.advance_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ self.a = None;
+ }
+
+ if let Some(ref mut b) = self.b {
+ match b.advance_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ // we don't fuse the second iterator
+ }
+
+ if rem == 0 { Ok(()) } else { Err(n - rem) }
+ }
+
+ #[inline]
+ fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
+ if let Some(ref mut a) = self.a {
+ match a.advance_by(n) {
+ Ok(()) => match a.next() {
+ None => n = 0,
+ x => return x,
+ },
+ Err(k) => n -= k,
+ }
+
+ self.a = None;
+ }
+
+ maybe!(self.b.nth(n))
+ }
+
+ #[inline]
+ fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ match fuse!(self.a.find(&mut predicate)) {
+ None => maybe!(self.b.find(predicate)),
+ item => item,
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<A::Item> {
+ // Must exhaust a before b.
+ let a_last = match self.a {
+ Some(a) => a.last(),
+ None => None,
+ };
+ let b_last = match self.b {
+ Some(b) => b.last(),
+ None => None,
+ };
+ b_last.or(a_last)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ Chain { a: Some(a), b: Some(b) } => {
+ let (a_lower, a_upper) = a.size_hint();
+ let (b_lower, b_upper) = b.size_hint();
+
+ let lower = a_lower.saturating_add(b_lower);
+
+ let upper = match (a_upper, b_upper) {
+ (Some(x), Some(y)) => x.checked_add(y),
+ _ => None,
+ };
+
+ (lower, upper)
+ }
+ Chain { a: Some(a), b: None } => a.size_hint(),
+ Chain { a: None, b: Some(b) } => b.size_hint(),
+ Chain { a: None, b: None } => (0, Some(0)),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> DoubleEndedIterator for Chain<A, B>
+where
+ A: DoubleEndedIterator,
+ B: DoubleEndedIterator<Item = A::Item>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<A::Item> {
+ match fuse!(self.b.next_back()) {
+ None => maybe!(self.a.next_back()),
+ item => item,
+ }
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let mut rem = n;
+
+ if let Some(ref mut b) = self.b {
+ match b.advance_back_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ self.b = None;
+ }
+
+ if let Some(ref mut a) = self.a {
+ match a.advance_back_by(rem) {
+ Ok(()) => return Ok(()),
+ Err(k) => rem -= k,
+ }
+ // we don't fuse the second iterator
+ }
+
+ if rem == 0 { Ok(()) } else { Err(n - rem) }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, mut n: usize) -> Option<Self::Item> {
+ if let Some(ref mut b) = self.b {
+ match b.advance_back_by(n) {
+ Ok(()) => match b.next_back() {
+ None => n = 0,
+ x => return x,
+ },
+ Err(k) => n -= k,
+ }
+
+ self.b = None;
+ }
+
+ maybe!(self.a.nth_back(n))
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, mut predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ match fuse!(self.b.rfind(&mut predicate)) {
+ None => maybe!(self.a.rfind(predicate)),
+ item => item,
+ }
+ }
+
+ fn try_rfold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ if let Some(ref mut b) = self.b {
+ acc = b.try_rfold(acc, &mut f)?;
+ self.b = None;
+ }
+ if let Some(ref mut a) = self.a {
+ acc = a.try_rfold(acc, f)?;
+ // we don't fuse the second iterator
+ }
+ try { acc }
+ }
+
+ fn rfold<Acc, F>(self, mut acc: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(b) = self.b {
+ acc = b.rfold(acc, &mut f);
+ }
+ if let Some(a) = self.a {
+ acc = a.rfold(acc, f);
+ }
+ acc
+ }
+}
+
+// Note: *both* must be fused to handle double-ended iterators.
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A, B> FusedIterator for Chain<A, B>
+where
+ A: FusedIterator,
+ B: FusedIterator<Item = A::Item>,
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A, B> TrustedLen for Chain<A, B>
+where
+ A: TrustedLen,
+ B: TrustedLen<Item = A::Item>,
+{
+}
--- /dev/null
+use crate::fmt;
+use crate::ops::Try;
+
+use super::super::{DoubleEndedIterator, Fuse, FusedIterator, Iterator};
+use super::Map;
+
+/// An iterator that maps each element to an iterator, and yields the elements
+/// of the produced iterators.
+///
+/// This `struct` is created by [`Iterator::flat_map`]. See its documentation
+/// for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct FlatMap<I, U: IntoIterator, F> {
+ inner: FlattenCompat<Map<I, F>, <U as IntoIterator>::IntoIter>,
+}
+impl<I: Iterator, U: IntoIterator, F: FnMut(I::Item) -> U> FlatMap<I, U, F> {
+ pub(in super::super) fn new(iter: I, f: F) -> FlatMap<I, U, F> {
+ FlatMap { inner: FlattenCompat::new(iter.map(f)) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Clone, U, F: Clone> Clone for FlatMap<I, U, F>
+where
+ U: Clone + IntoIterator<IntoIter: Clone>,
+{
+ fn clone(&self) -> Self {
+ FlatMap { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, U, F> fmt::Debug for FlatMap<I, U, F>
+where
+ U: IntoIterator<IntoIter: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FlatMap").field("inner", &self.inner).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, U: IntoIterator, F> Iterator for FlatMap<I, U, F>
+where
+ F: FnMut(I::Item) -> U,
+{
+ type Item = U::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<U::Item> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.inner.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator, U, F> DoubleEndedIterator for FlatMap<I, U, F>
+where
+ F: FnMut(I::Item) -> U,
+ U: IntoIterator<IntoIter: DoubleEndedIterator>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<U::Item> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.inner.try_rfold(init, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.rfold(init, fold)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I, U, F> FusedIterator for FlatMap<I, U, F>
+where
+ I: FusedIterator,
+ U: IntoIterator,
+ F: FnMut(I::Item) -> U,
+{
+}
+
+/// An iterator that flattens one level of nesting in an iterator of things
+/// that can be turned into iterators.
+///
+/// This `struct` is created by the [`flatten`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`flatten`]: Iterator::flatten
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+pub struct Flatten<I: Iterator<Item: IntoIterator>> {
+ inner: FlattenCompat<I, <I::Item as IntoIterator>::IntoIter>,
+}
+
+impl<I: Iterator<Item: IntoIterator>> Flatten<I> {
+ pub(in super::super) fn new(iter: I) -> Flatten<I> {
+ Flatten { inner: FlattenCompat::new(iter) }
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> fmt::Debug for Flatten<I>
+where
+ I: fmt::Debug + Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: fmt::Debug + Iterator,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Flatten").field("inner", &self.inner).finish()
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> Clone for Flatten<I>
+where
+ I: Clone + Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Clone + Iterator,
+{
+ fn clone(&self) -> Self {
+ Flatten { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> Iterator for Flatten<I>
+where
+ I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Iterator,
+{
+ type Item = U::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<U::Item> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.inner.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> DoubleEndedIterator for Flatten<I>
+where
+ I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<U::Item> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.inner.try_rfold(init, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.rfold(init, fold)
+ }
+}
+
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+impl<I, U> FusedIterator for Flatten<I>
+where
+ I: FusedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Iterator,
+{
+}
+
+/// Real logic of both `Flatten` and `FlatMap` which simply delegate to
+/// this type.
+#[derive(Clone, Debug)]
+struct FlattenCompat<I, U> {
+ iter: Fuse<I>,
+ frontiter: Option<U>,
+ backiter: Option<U>,
+}
+impl<I, U> FlattenCompat<I, U>
+where
+ I: Iterator,
+{
+ /// Adapts an iterator by flattening it, for use in `flatten()` and `flat_map()`.
+ fn new(iter: I) -> FlattenCompat<I, U> {
+ FlattenCompat { iter: iter.fuse(), frontiter: None, backiter: None }
+ }
+}
+
+impl<I, U> Iterator for FlattenCompat<I, U>
+where
+ I: Iterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: Iterator,
+{
+ type Item = U::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<U::Item> {
+ loop {
+ if let Some(ref mut inner) = self.frontiter {
+ match inner.next() {
+ None => self.frontiter = None,
+ elt @ Some(_) => return elt,
+ }
+ }
+ match self.iter.next() {
+ None => return self.backiter.as_mut()?.next(),
+ Some(inner) => self.frontiter = Some(inner.into_iter()),
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), U::size_hint);
+ let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), U::size_hint);
+ let lo = flo.saturating_add(blo);
+ match (self.iter.size_hint(), fhi, bhi) {
+ ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)),
+ _ => (lo, None),
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ #[inline]
+ fn flatten<'a, T: IntoIterator, Acc, R: Try<Ok = Acc>>(
+ frontiter: &'a mut Option<T::IntoIter>,
+ fold: &'a mut impl FnMut(Acc, T::Item) -> R,
+ ) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, x| {
+ let mut mid = x.into_iter();
+ let r = mid.try_fold(acc, &mut *fold);
+ *frontiter = Some(mid);
+ r
+ }
+ }
+
+ if let Some(ref mut front) = self.frontiter {
+ init = front.try_fold(init, &mut fold)?;
+ }
+ self.frontiter = None;
+
+ init = self.iter.try_fold(init, flatten(&mut self.frontiter, &mut fold))?;
+ self.frontiter = None;
+
+ if let Some(ref mut back) = self.backiter {
+ init = back.try_fold(init, &mut fold)?;
+ }
+ self.backiter = None;
+
+ try { init }
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, ref mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn flatten<U: Iterator, Acc>(
+ fold: &mut impl FnMut(Acc, U::Item) -> Acc,
+ ) -> impl FnMut(Acc, U) -> Acc + '_ {
+ move |acc, iter| iter.fold(acc, &mut *fold)
+ }
+
+ self.frontiter
+ .into_iter()
+ .chain(self.iter.map(IntoIterator::into_iter))
+ .chain(self.backiter)
+ .fold(init, flatten(fold))
+ }
+}
+
+impl<I, U> DoubleEndedIterator for FlattenCompat<I, U>
+where
+ I: DoubleEndedIterator<Item: IntoIterator<IntoIter = U, Item = U::Item>>,
+ U: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<U::Item> {
+ loop {
+ if let Some(ref mut inner) = self.backiter {
+ match inner.next_back() {
+ None => self.backiter = None,
+ elt @ Some(_) => return elt,
+ }
+ }
+ match self.iter.next_back() {
+ None => return self.frontiter.as_mut()?.next_back(),
+ next => self.backiter = next.map(IntoIterator::into_iter),
+ }
+ }
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ #[inline]
+ fn flatten<'a, T: IntoIterator, Acc, R: Try<Ok = Acc>>(
+ backiter: &'a mut Option<T::IntoIter>,
+ fold: &'a mut impl FnMut(Acc, T::Item) -> R,
+ ) -> impl FnMut(Acc, T) -> R + 'a
+ where
+ T::IntoIter: DoubleEndedIterator,
+ {
+ move |acc, x| {
+ let mut mid = x.into_iter();
+ let r = mid.try_rfold(acc, &mut *fold);
+ *backiter = Some(mid);
+ r
+ }
+ }
+
+ if let Some(ref mut back) = self.backiter {
+ init = back.try_rfold(init, &mut fold)?;
+ }
+ self.backiter = None;
+
+ init = self.iter.try_rfold(init, flatten(&mut self.backiter, &mut fold))?;
+ self.backiter = None;
+
+ if let Some(ref mut front) = self.frontiter {
+ init = front.try_rfold(init, &mut fold)?;
+ }
+ self.frontiter = None;
+
+ try { init }
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, ref mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn flatten<U: DoubleEndedIterator, Acc>(
+ fold: &mut impl FnMut(Acc, U::Item) -> Acc,
+ ) -> impl FnMut(Acc, U) -> Acc + '_ {
+ move |acc, iter| iter.rfold(acc, &mut *fold)
+ }
+
+ self.frontiter
+ .into_iter()
+ .chain(self.iter.map(IntoIterator::into_iter))
+ .chain(self.backiter)
+ .rfold(init, flatten(fold))
+ }
+}
--- /dev/null
+use super::InPlaceIterable;
+use crate::intrinsics;
+use crate::iter::adapters::zip::try_get_unchecked;
+use crate::iter::adapters::SourceIter;
+use crate::iter::TrustedRandomAccess;
+use crate::iter::{DoubleEndedIterator, ExactSizeIterator, FusedIterator, Iterator};
+use crate::ops::Try;
+
+/// An iterator that yields `None` forever after the underlying iterator
+/// yields `None` once.
+///
+/// This `struct` is created by [`Iterator::fuse`]. See its documentation
+/// for more.
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Fuse<I> {
+ // NOTE: for `I: FusedIterator`, this is always assumed `Some`!
+ iter: Option<I>,
+}
+impl<I> Fuse<I> {
+ pub(in crate::iter) fn new(iter: I) -> Fuse<I> {
+ Fuse { iter: Some(iter) }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Fuse<I> where I: Iterator {}
+
+/// Fuse the iterator if the expression is `None`.
+macro_rules! fuse {
+ ($self:ident . iter . $($call:tt)+) => {
+ match $self.iter {
+ Some(ref mut iter) => match iter.$($call)+ {
+ None => {
+ $self.iter = None;
+ None
+ }
+ item => item,
+ },
+ None => None,
+ }
+ };
+}
+
+// NOTE: for `I: FusedIterator`, we assume that the iterator is always `Some`.
+// Implementing this as a directly-expanded macro helps codegen performance.
+macro_rules! unchecked {
+ ($self:ident) => {
+ match $self {
+ Fuse { iter: Some(iter) } => iter,
+ // SAFETY: the specialized iterator never sets `None`
+ Fuse { iter: None } => unsafe { intrinsics::unreachable() },
+ }
+ };
+}
+
+// Any implementation here is made internal to avoid exposing default fns outside this trait
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Fuse<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ FuseImpl::next(self)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ FuseImpl::nth(self, n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ FuseImpl::last(self)
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ FuseImpl::count(self)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ FuseImpl::size_hint(self)
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ FuseImpl::try_fold(self, acc, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ FuseImpl::fold(self, acc, fold)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ FuseImpl::find(self, predicate)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ match self.iter {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ Some(ref mut iter) => unsafe { try_get_unchecked(iter, idx) },
+ // SAFETY: the caller asserts there is an item at `i`, so we're not exhausted.
+ None => unsafe { intrinsics::unreachable() },
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> DoubleEndedIterator for Fuse<I>
+where
+ I: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
+ FuseImpl::next_back(self)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
+ FuseImpl::nth_back(self, n)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ FuseImpl::try_rfold(self, acc, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ FuseImpl::rfold(self, acc, fold)
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ FuseImpl::rfind(self, predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Fuse<I>
+where
+ I: ExactSizeIterator,
+{
+ fn len(&self) -> usize {
+ FuseImpl::len(self)
+ }
+
+ fn is_empty(&self) -> bool {
+ FuseImpl::is_empty(self)
+ }
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Fuse<I>
+where
+ I: TrustedRandomAccess,
+{
+ fn may_have_side_effect() -> bool {
+ I::may_have_side_effect()
+ }
+}
+
+// Fuse specialization trait
+#[doc(hidden)]
+trait FuseImpl<I> {
+ type Item;
+
+ // Functions specific to any normal Iterators
+ fn next(&mut self) -> Option<Self::Item>;
+ fn nth(&mut self, n: usize) -> Option<Self::Item>;
+ fn last(self) -> Option<Self::Item>;
+ fn count(self) -> usize;
+ fn size_hint(&self) -> (usize, Option<usize>);
+ fn try_fold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>;
+ fn fold<Acc, Fold>(self, acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc;
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool;
+
+ // Functions specific to DoubleEndedIterators
+ fn next_back(&mut self) -> Option<Self::Item>
+ where
+ I: DoubleEndedIterator;
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item>
+ where
+ I: DoubleEndedIterator;
+ fn try_rfold<Acc, Fold, R>(&mut self, acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ I: DoubleEndedIterator;
+ fn rfold<Acc, Fold>(self, acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ I: DoubleEndedIterator;
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ I: DoubleEndedIterator;
+
+ // Functions specific to ExactSizeIterator
+ fn len(&self) -> usize
+ where
+ I: ExactSizeIterator;
+ fn is_empty(&self) -> bool
+ where
+ I: ExactSizeIterator;
+}
+
+// General Fuse impl
+#[doc(hidden)]
+impl<I> FuseImpl<I> for Fuse<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ default fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ fuse!(self.iter.next())
+ }
+
+ #[inline]
+ default fn nth(&mut self, n: usize) -> Option<I::Item> {
+ fuse!(self.iter.nth(n))
+ }
+
+ #[inline]
+ default fn last(self) -> Option<I::Item> {
+ match self.iter {
+ Some(iter) => iter.last(),
+ None => None,
+ }
+ }
+
+ #[inline]
+ default fn count(self) -> usize {
+ match self.iter {
+ Some(iter) => iter.count(),
+ None => 0,
+ }
+ }
+
+ #[inline]
+ default fn size_hint(&self) -> (usize, Option<usize>) {
+ match self.iter {
+ Some(ref iter) => iter.size_hint(),
+ None => (0, Some(0)),
+ }
+ }
+
+ #[inline]
+ default fn try_fold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ if let Some(ref mut iter) = self.iter {
+ acc = iter.try_fold(acc, fold)?;
+ self.iter = None;
+ }
+ try { acc }
+ }
+
+ #[inline]
+ default fn fold<Acc, Fold>(self, mut acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if let Some(iter) = self.iter {
+ acc = iter.fold(acc, fold);
+ }
+ acc
+ }
+
+ #[inline]
+ default fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ fuse!(self.iter.find(predicate))
+ }
+
+ #[inline]
+ default fn next_back(&mut self) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ fuse!(self.iter.next_back())
+ }
+
+ #[inline]
+ default fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ fuse!(self.iter.nth_back(n))
+ }
+
+ #[inline]
+ default fn try_rfold<Acc, Fold, R>(&mut self, mut acc: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ I: DoubleEndedIterator,
+ {
+ if let Some(ref mut iter) = self.iter {
+ acc = iter.try_rfold(acc, fold)?;
+ self.iter = None;
+ }
+ try { acc }
+ }
+
+ #[inline]
+ default fn rfold<Acc, Fold>(self, mut acc: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ I: DoubleEndedIterator,
+ {
+ if let Some(iter) = self.iter {
+ acc = iter.rfold(acc, fold);
+ }
+ acc
+ }
+
+ #[inline]
+ default fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ I: DoubleEndedIterator,
+ {
+ fuse!(self.iter.rfind(predicate))
+ }
+
+ #[inline]
+ default fn len(&self) -> usize
+ where
+ I: ExactSizeIterator,
+ {
+ match self.iter {
+ Some(ref iter) => iter.len(),
+ None => 0,
+ }
+ }
+
+ #[inline]
+ default fn is_empty(&self) -> bool
+ where
+ I: ExactSizeIterator,
+ {
+ match self.iter {
+ Some(ref iter) => iter.is_empty(),
+ None => true,
+ }
+ }
+}
+
+#[doc(hidden)]
+impl<I> FuseImpl<I> for Fuse<I>
+where
+ I: FusedIterator,
+{
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ unchecked!(self).next()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ unchecked!(self).nth(n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<I::Item> {
+ unchecked!(self).last()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ unchecked!(self).count()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ unchecked!(self).size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ unchecked!(self).try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ unchecked!(self).fold(init, fold)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ unchecked!(self).find(predicate)
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ unchecked!(self).next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item>
+ where
+ I: DoubleEndedIterator,
+ {
+ unchecked!(self).nth_back(n)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ I: DoubleEndedIterator,
+ {
+ unchecked!(self).try_rfold(init, fold)
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ I: DoubleEndedIterator,
+ {
+ unchecked!(self).rfold(init, fold)
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ I: DoubleEndedIterator,
+ {
+ unchecked!(self).rfind(predicate)
+ }
+
+ #[inline]
+ fn len(&self) -> usize
+ where
+ I: ExactSizeIterator,
+ {
+ unchecked!(self).len()
+ }
+
+ #[inline]
+ fn is_empty(&self) -> bool
+ where
+ I: ExactSizeIterator,
+ {
+ unchecked!(self).is_empty()
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, I: FusedIterator> SourceIter for Fuse<I>
+where
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ match self.iter {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ Some(ref mut iter) => unsafe { SourceIter::as_inner(iter) },
+ // SAFETY: the specialized iterator never sets `None`
+ None => unsafe { intrinsics::unreachable() },
+ }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Fuse<I> {}
--- /dev/null
+use crate::cmp;
+use crate::fmt;
+use crate::intrinsics;
+use crate::ops::{Add, AddAssign, ControlFlow, Try};
+
+use super::from_fn;
+use super::{
+ DoubleEndedIterator, ExactSizeIterator, FusedIterator, InPlaceIterable, Iterator, TrustedLen,
+};
+
+mod chain;
+mod flatten;
+mod fuse;
+mod zip;
+
+pub use self::chain::Chain;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::flatten::{FlatMap, Flatten};
+pub use self::fuse::Fuse;
+use self::zip::try_get_unchecked;
+#[unstable(feature = "trusted_random_access", issue = "none")]
+pub use self::zip::TrustedRandomAccess;
+pub use self::zip::Zip;
+
+/// This trait provides transitive access to source-stage in an interator-adapter pipeline
+/// under the conditions that
+/// * the iterator source `S` itself implements `SourceIter<Source = S>`
+/// * there is a delegating implementation of this trait for each adapter in the pipeline between
+/// the source and the pipeline consumer.
+///
+/// When the source is an owning iterator struct (commonly called `IntoIter`) then
+/// this can be useful for specializing [`FromIterator`] implementations or recovering the
+/// remaining elements after an iterator has been partially exhausted.
+///
+/// Note that implementations do not necessarily have to provide access to the inner-most
+/// source of a pipeline. A stateful intermediate adapter might eagerly evaluate a part
+/// of the pipeline and expose its internal storage as source.
+///
+/// The trait is unsafe because implementers must uphold additional safety properties.
+/// See [`as_inner`] for details.
+///
+/// # Examples
+///
+/// Retrieving a partially consumed source:
+///
+/// ```
+/// # #![feature(inplace_iteration)]
+/// # use std::iter::SourceIter;
+///
+/// let mut iter = vec![9, 9, 9].into_iter().map(|i| i * i);
+/// let _ = iter.next();
+/// let mut remainder = std::mem::replace(unsafe { iter.as_inner() }, Vec::new().into_iter());
+/// println!("n = {} elements remaining", remainder.len());
+/// ```
+///
+/// [`FromIterator`]: crate::iter::FromIterator
+/// [`as_inner`]: SourceIter::as_inner
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub unsafe trait SourceIter {
+ /// A source stage in an iterator pipeline.
+ type Source: Iterator;
+
+ /// Retrieve the source of an iterator pipeline.
+ ///
+ /// # Safety
+ ///
+ /// Implementations of must return the same mutable reference for their lifetime, unless
+ /// replaced by a caller.
+ /// Callers may only replace the reference when they stopped iteration and drop the
+ /// iterator pipeline after extracting the source.
+ ///
+ /// This means iterator adapters can rely on the source not changing during
+ /// iteration but they cannot rely on it in their Drop implementations.
+ ///
+ /// Implementing this method means adapters relinquish private-only access to their
+ /// source and can only rely on guarantees made based on method receiver types.
+ /// The lack of restricted access also requires that adapters must uphold the source's
+ /// public API even when they have access to its internals.
+ ///
+ /// Callers in turn must expect the source to be in any state that is consistent with
+ /// its public API since adapters sitting between it and the source have the same
+ /// access. In particular an adapter may have consumed more elements than strictly necessary.
+ ///
+ /// The overall goal of these requirements is to let the consumer of a pipeline use
+ /// * whatever remains in the source after iteration has stopped
+ /// * the memory that has become unused by advancing a consuming iterator
+ ///
+ /// [`next()`]: Iterator::next
+ unsafe fn as_inner(&mut self) -> &mut Self::Source;
+}
+
+/// A double-ended iterator with the direction inverted.
+///
+/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`rev`]: Iterator::rev
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Rev<T> {
+ iter: T,
+}
+impl<T> Rev<T> {
+ pub(super) fn new(iter: T) -> Rev<T> {
+ Rev { iter }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Rev<I>
+where
+ I: DoubleEndedIterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.next_back()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.iter.advance_back_by(n)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
+ self.iter.nth_back(n)
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ self.iter.try_rfold(init, f)
+ }
+
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, f)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.iter.rfind(predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> DoubleEndedIterator for Rev<I>
+where
+ I: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ self.iter.advance_by(n)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
+ self.iter.nth(n)
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ self.iter.try_fold(init, f)
+ }
+
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, f)
+ }
+
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.iter.find(predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Rev<I>
+where
+ I: ExactSizeIterator + DoubleEndedIterator,
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Rev<I> where I: FusedIterator + DoubleEndedIterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Rev<I> where I: TrustedLen + DoubleEndedIterator {}
+
+/// An iterator that copies the elements of an underlying iterator.
+///
+/// This `struct` is created by the [`copied`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`copied`]: Iterator::copied
+/// [`Iterator`]: trait.Iterator.html
+#[stable(feature = "iter_copied", since = "1.36.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Copied<I> {
+ it: I,
+}
+
+impl<I> Copied<I> {
+ pub(super) fn new(it: I) -> Copied<I> {
+ Copied { it }
+ }
+}
+
+fn copy_fold<T: Copy, Acc>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, &T) -> Acc {
+ move |acc, &elt| f(acc, elt)
+}
+
+fn copy_try_fold<T: Copy, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
+ move |acc, &elt| f(acc, elt)
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> Iterator for Copied<I>
+where
+ I: Iterator<Item = &'a T>,
+ T: Copy,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.it.next().copied()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ self.it.try_fold(init, copy_try_fold(f))
+ }
+
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.fold(init, copy_fold(f))
+ }
+
+ fn nth(&mut self, n: usize) -> Option<T> {
+ self.it.nth(n).copied()
+ }
+
+ fn last(self) -> Option<T> {
+ self.it.last().copied()
+ }
+
+ fn count(self) -> usize {
+ self.it.count()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ *unsafe { try_get_unchecked(&mut self.it, idx) }
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> DoubleEndedIterator for Copied<I>
+where
+ I: DoubleEndedIterator<Item = &'a T>,
+ T: Copy,
+{
+ fn next_back(&mut self) -> Option<T> {
+ self.it.next_back().copied()
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ self.it.try_rfold(init, copy_try_fold(f))
+ }
+
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.rfold(init, copy_fold(f))
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> ExactSizeIterator for Copied<I>
+where
+ I: ExactSizeIterator<Item = &'a T>,
+ T: Copy,
+{
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.it.is_empty()
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+impl<'a, I, T: 'a> FusedIterator for Copied<I>
+where
+ I: FusedIterator<Item = &'a T>,
+ T: Copy,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Copied<I>
+where
+ I: TrustedRandomAccess,
+{
+ #[inline]
+ fn may_have_side_effect() -> bool {
+ I::may_have_side_effect()
+ }
+}
+
+#[stable(feature = "iter_copied", since = "1.36.0")]
+unsafe impl<'a, I, T: 'a> TrustedLen for Copied<I>
+where
+ I: TrustedLen<Item = &'a T>,
+ T: Copy,
+{
+}
+
+/// An iterator that clones the elements of an underlying iterator.
+///
+/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`cloned`]: Iterator::cloned
+/// [`Iterator`]: trait.Iterator.html
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Clone, Debug)]
+pub struct Cloned<I> {
+ it: I,
+}
+impl<I> Cloned<I> {
+ pub(super) fn new(it: I) -> Cloned<I> {
+ Cloned { it }
+ }
+}
+
+fn clone_try_fold<T: Clone, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
+ move |acc, elt| f(acc, elt.clone())
+}
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+impl<'a, I, T: 'a> Iterator for Cloned<I>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.it.next().cloned()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ self.it.try_fold(init, clone_try_fold(f))
+ }
+
+ fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.map(T::clone).fold(init, f)
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> T
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { try_get_unchecked(&mut self.it, idx).clone() }
+ }
+}
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+impl<'a, I, T: 'a> DoubleEndedIterator for Cloned<I>
+where
+ I: DoubleEndedIterator<Item = &'a T>,
+ T: Clone,
+{
+ fn next_back(&mut self) -> Option<T> {
+ self.it.next_back().cloned()
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ self.it.try_rfold(init, clone_try_fold(f))
+ }
+
+ fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.it.map(T::clone).rfold(init, f)
+ }
+}
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+impl<'a, I, T: 'a> ExactSizeIterator for Cloned<I>
+where
+ I: ExactSizeIterator<Item = &'a T>,
+ T: Clone,
+{
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.it.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<'a, I, T: 'a> FusedIterator for Cloned<I>
+where
+ I: FusedIterator<Item = &'a T>,
+ T: Clone,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Cloned<I>
+where
+ I: TrustedRandomAccess,
+{
+ #[inline]
+ fn may_have_side_effect() -> bool {
+ true
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<'a, I, T: 'a> TrustedLen for Cloned<I>
+where
+ I: TrustedLen<Item = &'a T>,
+ T: Clone,
+{
+}
+
+/// An iterator that repeats endlessly.
+///
+/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`cycle`]: Iterator::cycle
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Cycle<I> {
+ orig: I,
+ iter: I,
+}
+impl<I: Clone> Cycle<I> {
+ pub(super) fn new(iter: I) -> Cycle<I> {
+ Cycle { orig: iter.clone(), iter }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Cycle<I>
+where
+ I: Clone + Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ match self.iter.next() {
+ None => {
+ self.iter = self.orig.clone();
+ self.iter.next()
+ }
+ y => y,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // the cycle iterator is either empty or infinite
+ match self.orig.size_hint() {
+ sz @ (0, Some(0)) => sz,
+ (0, _) => (0, None),
+ _ => (usize::MAX, None),
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ // fully iterate the current iterator. this is necessary because
+ // `self.iter` may be empty even when `self.orig` isn't
+ acc = self.iter.try_fold(acc, &mut f)?;
+ self.iter = self.orig.clone();
+
+ // complete a full cycle, keeping track of whether the cycled
+ // iterator is empty or not. we need to return early in case
+ // of an empty iterator to prevent an infinite loop
+ let mut is_empty = true;
+ acc = self.iter.try_fold(acc, |acc, x| {
+ is_empty = false;
+ f(acc, x)
+ })?;
+
+ if is_empty {
+ return try { acc };
+ }
+
+ loop {
+ self.iter = self.orig.clone();
+ acc = self.iter.try_fold(acc, &mut f)?;
+ }
+ }
+
+ // No `fold` override, because `fold` doesn't make much sense for `Cycle`,
+ // and we can't do anything better than the default.
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Cycle<I> where I: Clone + Iterator {}
+
+/// An iterator for stepping iterators by a custom amount.
+///
+/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See
+/// its documentation for more.
+///
+/// [`step_by`]: Iterator::step_by
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+#[derive(Clone, Debug)]
+pub struct StepBy<I> {
+ iter: I,
+ step: usize,
+ first_take: bool,
+}
+impl<I> StepBy<I> {
+ pub(super) fn new(iter: I, step: usize) -> StepBy<I> {
+ assert!(step != 0);
+ StepBy { iter, step: step - 1, first_take: true }
+ }
+}
+
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+impl<I> Iterator for StepBy<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.first_take {
+ self.first_take = false;
+ self.iter.next()
+ } else {
+ self.iter.nth(self.step)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ #[inline]
+ fn first_size(step: usize) -> impl Fn(usize) -> usize {
+ move |n| if n == 0 { 0 } else { 1 + (n - 1) / (step + 1) }
+ }
+
+ #[inline]
+ fn other_size(step: usize) -> impl Fn(usize) -> usize {
+ move |n| n / (step + 1)
+ }
+
+ let (low, high) = self.iter.size_hint();
+
+ if self.first_take {
+ let f = first_size(self.step);
+ (f(low), high.map(f))
+ } else {
+ let f = other_size(self.step);
+ (f(low), high.map(f))
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
+ if self.first_take {
+ self.first_take = false;
+ let first = self.iter.next();
+ if n == 0 {
+ return first;
+ }
+ n -= 1;
+ }
+ // n and self.step are indices, we need to add 1 to get the amount of elements
+ // When calling `.nth`, we need to subtract 1 again to convert back to an index
+ // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1`
+ let mut step = self.step + 1;
+ // n + 1 could overflow
+ // thus, if n is usize::MAX, instead of adding one, we call .nth(step)
+ if n == usize::MAX {
+ self.iter.nth(step - 1);
+ } else {
+ n += 1;
+ }
+
+ // overflow handling
+ loop {
+ let mul = n.checked_mul(step);
+ {
+ if intrinsics::likely(mul.is_some()) {
+ return self.iter.nth(mul.unwrap() - 1);
+ }
+ }
+ let div_n = usize::MAX / n;
+ let div_step = usize::MAX / step;
+ let nth_n = div_n * n;
+ let nth_step = div_step * step;
+ let nth = if nth_n > nth_step {
+ step -= div_n;
+ nth_n
+ } else {
+ n -= div_step;
+ nth_step
+ };
+ self.iter.nth(nth - 1);
+ }
+ }
+
+ fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ #[inline]
+ fn nth<I: Iterator>(iter: &mut I, step: usize) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth(step)
+ }
+
+ if self.first_take {
+ self.first_take = false;
+ match self.iter.next() {
+ None => return try { acc },
+ Some(x) => acc = f(acc, x)?,
+ }
+ }
+ from_fn(nth(&mut self.iter, self.step)).try_fold(acc, f)
+ }
+
+ fn fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn nth<I: Iterator>(iter: &mut I, step: usize) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth(step)
+ }
+
+ if self.first_take {
+ self.first_take = false;
+ match self.iter.next() {
+ None => return acc,
+ Some(x) => acc = f(acc, x),
+ }
+ }
+ from_fn(nth(&mut self.iter, self.step)).fold(acc, f)
+ }
+}
+
+impl<I> StepBy<I>
+where
+ I: ExactSizeIterator,
+{
+ // The zero-based index starting from the end of the iterator of the
+ // last element. Used in the `DoubleEndedIterator` implementation.
+ fn next_back_index(&self) -> usize {
+ let rem = self.iter.len() % (self.step + 1);
+ if self.first_take {
+ if rem == 0 { self.step } else { rem - 1 }
+ } else {
+ rem
+ }
+ }
+}
+
+#[stable(feature = "double_ended_step_by_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for StepBy<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.nth_back(self.next_back_index())
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ // `self.iter.nth_back(usize::MAX)` does the right thing here when `n`
+ // is out of bounds because the length of `self.iter` does not exceed
+ // `usize::MAX` (because `I: ExactSizeIterator`) and `nth_back` is
+ // zero-indexed
+ let n = n.saturating_mul(self.step + 1).saturating_add(self.next_back_index());
+ self.iter.nth_back(n)
+ }
+
+ fn try_rfold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
+ where
+ F: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ #[inline]
+ fn nth_back<I: DoubleEndedIterator>(
+ iter: &mut I,
+ step: usize,
+ ) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth_back(step)
+ }
+
+ match self.next_back() {
+ None => try { init },
+ Some(x) => {
+ let acc = f(init, x)?;
+ from_fn(nth_back(&mut self.iter, self.step)).try_fold(acc, f)
+ }
+ }
+ }
+
+ #[inline]
+ fn rfold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
+ where
+ Self: Sized,
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn nth_back<I: DoubleEndedIterator>(
+ iter: &mut I,
+ step: usize,
+ ) -> impl FnMut() -> Option<I::Item> + '_ {
+ move || iter.nth_back(step)
+ }
+
+ match self.next_back() {
+ None => init,
+ Some(x) => {
+ let acc = f(init, x);
+ from_fn(nth_back(&mut self.iter, self.step)).fold(acc, f)
+ }
+ }
+ }
+}
+
+// StepBy can only make the iterator shorter, so the len will still fit.
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+impl<I> ExactSizeIterator for StepBy<I> where I: ExactSizeIterator {}
+
+/// An iterator that maps the values of `iter` with `f`.
+///
+/// This `struct` is created by the [`map`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`map`]: Iterator::map
+/// [`Iterator`]: trait.Iterator.html
+///
+/// # Notes about side effects
+///
+/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that
+/// you can also [`map`] backwards:
+///
+/// ```rust
+/// let v: Vec<i32> = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect();
+///
+/// assert_eq!(v, [4, 3, 2]);
+/// ```
+///
+/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html
+///
+/// But if your closure has state, iterating backwards may act in a way you do
+/// not expect. Let's go through an example. First, in the forward direction:
+///
+/// ```rust
+/// let mut c = 0;
+///
+/// for pair in vec!['a', 'b', 'c'].into_iter()
+/// .map(|letter| { c += 1; (letter, c) }) {
+/// println!("{:?}", pair);
+/// }
+/// ```
+///
+/// This will print "('a', 1), ('b', 2), ('c', 3)".
+///
+/// Now consider this twist where we add a call to `rev`. This version will
+/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed,
+/// but the values of the counter still go in order. This is because `map()` is
+/// still being called lazily on each item, but we are popping items off the
+/// back of the vector now, instead of shifting them from the front.
+///
+/// ```rust
+/// let mut c = 0;
+///
+/// for pair in vec!['a', 'b', 'c'].into_iter()
+/// .map(|letter| { c += 1; (letter, c) })
+/// .rev() {
+/// println!("{:?}", pair);
+/// }
+/// ```
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Map<I, F> {
+ iter: I,
+ f: F,
+}
+impl<I, F> Map<I, F> {
+ pub(super) fn new(iter: I, f: F) -> Map<I, F> {
+ Map { iter, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for Map<I, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Map").field("iter", &self.iter).finish()
+ }
+}
+
+fn map_fold<T, B, Acc>(
+ mut f: impl FnMut(T) -> B,
+ mut g: impl FnMut(Acc, B) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, elt| g(acc, f(elt))
+}
+
+fn map_try_fold<'a, T, B, Acc, R>(
+ f: &'a mut impl FnMut(T) -> B,
+ mut g: impl FnMut(Acc, B) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, elt| g(acc, f(elt))
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: Iterator, F> Iterator for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ self.iter.next().map(&mut self.f)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn try_fold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
+ where
+ Self: Sized,
+ G: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_fold(init, map_try_fold(&mut self.f, g))
+ }
+
+ fn fold<Acc, G>(self, init: Acc, g: G) -> Acc
+ where
+ G: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, map_fold(self.f, g))
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> B
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { (self.f)(try_get_unchecked(&mut self.iter, idx)) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<B> {
+ self.iter.next_back().map(&mut self.f)
+ }
+
+ fn try_rfold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
+ where
+ Self: Sized,
+ G: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_rfold(init, map_try_fold(&mut self.f, g))
+ }
+
+ fn rfold<Acc, G>(self, init: Acc, g: G) -> Acc
+ where
+ G: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, map_fold(self.f, g))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: ExactSizeIterator, F> ExactSizeIterator for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<B, I: FusedIterator, F> FusedIterator for Map<I, F> where F: FnMut(I::Item) -> B {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<B, I, F> TrustedLen for Map<I, F>
+where
+ I: TrustedLen,
+ F: FnMut(I::Item) -> B,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I, F> TrustedRandomAccess for Map<I, F>
+where
+ I: TrustedRandomAccess,
+{
+ #[inline]
+ fn may_have_side_effect() -> bool {
+ true
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, B, I: Iterator, F> SourceIter for Map<I, F>
+where
+ F: FnMut(I::Item) -> B,
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for Map<I, F> where F: FnMut(I::Item) -> B {}
+
+/// An iterator that filters the elements of `iter` with `predicate`.
+///
+/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`filter`]: Iterator::filter
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Filter<I, P> {
+ iter: I,
+ predicate: P,
+}
+impl<I, P> Filter<I, P> {
+ pub(super) fn new(iter: I, predicate: P) -> Filter<I, P> {
+ Filter { iter, predicate }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for Filter<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Filter").field("iter", &self.iter).finish()
+ }
+}
+
+fn filter_fold<T, Acc>(
+ mut predicate: impl FnMut(&T) -> bool,
+ mut fold: impl FnMut(Acc, T) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| if predicate(&item) { fold(acc, item) } else { acc }
+}
+
+fn filter_try_fold<'a, T, Acc, R: Try<Ok = Acc>>(
+ predicate: &'a mut impl FnMut(&T) -> bool,
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| if predicate(&item) { fold(acc, item) } else { try { acc } }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, P> Iterator for Filter<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ self.iter.find(&mut self.predicate)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ // this special case allows the compiler to make `.filter(_).count()`
+ // branchless. Barring perfect branch prediction (which is unattainable in
+ // the general case), this will be much faster in >90% of cases (containing
+ // virtually all real workloads) and only a tiny bit slower in the rest.
+ //
+ // Having this specialization thus allows us to write `.filter(p).count()`
+ // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is
+ // less readable and also less backwards-compatible to Rust before 1.10.
+ //
+ // Using the branchless version will also simplify the LLVM byte code, thus
+ // leaving more budget for LLVM optimizations.
+ #[inline]
+ fn count(self) -> usize {
+ #[inline]
+ fn to_usize<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut(T) -> usize {
+ move |x| predicate(&x) as usize
+ }
+
+ self.iter.map(to_usize(self.predicate)).sum()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_fold(init, filter_try_fold(&mut self.predicate, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, filter_fold(self.predicate, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator, P> DoubleEndedIterator for Filter<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<I::Item> {
+ self.iter.rfind(&mut self.predicate)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_rfold(init, filter_try_fold(&mut self.predicate, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, filter_fold(self.predicate, fold))
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator, P> FusedIterator for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, P, I: Iterator> SourceIter for Filter<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, P> InPlaceIterable for Filter<I, P> where P: FnMut(&I::Item) -> bool {}
+
+/// An iterator that uses `f` to both filter and map elements from `iter`.
+///
+/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`filter_map`]: Iterator::filter_map
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct FilterMap<I, F> {
+ iter: I,
+ f: F,
+}
+impl<I, F> FilterMap<I, F> {
+ pub(super) fn new(iter: I, f: F) -> FilterMap<I, F> {
+ FilterMap { iter, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for FilterMap<I, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FilterMap").field("iter", &self.iter).finish()
+ }
+}
+
+fn filter_map_fold<T, B, Acc>(
+ mut f: impl FnMut(T) -> Option<B>,
+ mut fold: impl FnMut(Acc, B) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| match f(item) {
+ Some(x) => fold(acc, x),
+ None => acc,
+ }
+}
+
+fn filter_map_try_fold<'a, T, B, Acc, R: Try<Ok = Acc>>(
+ f: &'a mut impl FnMut(T) -> Option<B>,
+ mut fold: impl FnMut(Acc, B) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| match f(item) {
+ Some(x) => fold(acc, x),
+ None => try { acc },
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: Iterator, F> Iterator for FilterMap<I, F>
+where
+ F: FnMut(I::Item) -> Option<B>,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ self.iter.find_map(&mut self.f)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_fold(init, filter_map_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, filter_map_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I: DoubleEndedIterator, F> DoubleEndedIterator for FilterMap<I, F>
+where
+ F: FnMut(I::Item) -> Option<B>,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<B> {
+ #[inline]
+ fn find<T, B>(
+ f: &mut impl FnMut(T) -> Option<B>,
+ ) -> impl FnMut((), T) -> ControlFlow<B> + '_ {
+ move |(), x| match f(x) {
+ Some(x) => ControlFlow::Break(x),
+ None => ControlFlow::CONTINUE,
+ }
+ }
+
+ self.iter.try_rfold((), find(&mut self.f)).break_value()
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_rfold(init, filter_map_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, filter_map_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<B, I: FusedIterator, F> FusedIterator for FilterMap<I, F> where F: FnMut(I::Item) -> Option<B> {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, B, I: Iterator, F> SourceIter for FilterMap<I, F>
+where
+ F: FnMut(I::Item) -> Option<B>,
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<B, I: InPlaceIterable, F> InPlaceIterable for FilterMap<I, F> where
+ F: FnMut(I::Item) -> Option<B>
+{
+}
+
+/// An iterator that yields the current count and the element during iteration.
+///
+/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`enumerate`]: Iterator::enumerate
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Enumerate<I> {
+ iter: I,
+ count: usize,
+}
+impl<I> Enumerate<I> {
+ pub(super) fn new(iter: I) -> Enumerate<I> {
+ Enumerate { iter, count: 0 }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Enumerate<I>
+where
+ I: Iterator,
+{
+ type Item = (usize, <I as Iterator>::Item);
+
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so enumerating more than
+ /// `usize::MAX` elements either produces the wrong result or panics. If
+ /// debug assertions are enabled, a panic is guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// Might panic if the index of the element overflows a `usize`.
+ #[inline]
+ fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
+ let a = self.iter.next()?;
+ let i = self.count;
+ // Possible undefined overflow.
+ AddAssign::add_assign(&mut self.count, 1);
+ Some((i, a))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
+ let a = self.iter.nth(n)?;
+ // Possible undefined overflow.
+ let i = Add::add(self.count, n);
+ self.count = Add::add(i, 1);
+ Some((i, a))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ #[inline]
+ fn enumerate<'a, T, Acc, R>(
+ count: &'a mut usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| {
+ let acc = fold(acc, (*count, item));
+ // Possible undefined overflow.
+ AddAssign::add_assign(count, 1);
+ acc
+ }
+ }
+
+ self.iter.try_fold(init, enumerate(&mut self.count, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn enumerate<T, Acc>(
+ mut count: usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| {
+ let acc = fold(acc, (count, item));
+ // Possible undefined overflow.
+ AddAssign::add_assign(&mut count, 1);
+ acc
+ }
+ }
+
+ self.iter.fold(init, enumerate(self.count, fold))
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ let value = unsafe { try_get_unchecked(&mut self.iter, idx) };
+ (Add::add(self.count, idx), value)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> DoubleEndedIterator for Enumerate<I>
+where
+ I: ExactSizeIterator + DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
+ let a = self.iter.next_back()?;
+ let len = self.iter.len();
+ // Can safely add, `ExactSizeIterator` promises that the number of
+ // elements fits into a `usize`.
+ Some((self.count + len, a))
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<(usize, <I as Iterator>::Item)> {
+ let a = self.iter.nth_back(n)?;
+ let len = self.iter.len();
+ // Can safely add, `ExactSizeIterator` promises that the number of
+ // elements fits into a `usize`.
+ Some((self.count + len, a))
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ // Can safely add and subtract the count, as `ExactSizeIterator` promises
+ // that the number of elements fits into a `usize`.
+ fn enumerate<T, Acc, R>(
+ mut count: usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> R,
+ ) -> impl FnMut(Acc, T) -> R {
+ move |acc, item| {
+ count -= 1;
+ fold(acc, (count, item))
+ }
+ }
+
+ let count = self.count + self.iter.len();
+ self.iter.try_rfold(init, enumerate(count, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ // Can safely add and subtract the count, as `ExactSizeIterator` promises
+ // that the number of elements fits into a `usize`.
+ fn enumerate<T, Acc>(
+ mut count: usize,
+ mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
+ ) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| {
+ count -= 1;
+ fold(acc, (count, item))
+ }
+ }
+
+ let count = self.count + self.iter.len();
+ self.iter.rfold(init, enumerate(count, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Enumerate<I>
+where
+ I: ExactSizeIterator,
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<I> TrustedRandomAccess for Enumerate<I>
+where
+ I: TrustedRandomAccess,
+{
+ fn may_have_side_effect() -> bool {
+ I::may_have_side_effect()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Enumerate<I> where I: TrustedLen {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, I: Iterator> SourceIter for Enumerate<I>
+where
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {}
+
+/// An iterator with a `peek()` that returns an optional reference to the next
+/// element.
+///
+/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`peekable`]: Iterator::peekable
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Peekable<I: Iterator> {
+ iter: I,
+ /// Remember a peeked value, even if it was None.
+ peeked: Option<Option<I::Item>>,
+}
+impl<I: Iterator> Peekable<I> {
+ pub(super) fn new(iter: I) -> Peekable<I> {
+ Peekable { iter, peeked: None }
+ }
+}
+
+// Peekable must remember if a None has been seen in the `.peek()` method.
+// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the
+// underlying iterator at most once. This does not by itself make the iterator
+// fused.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator> Iterator for Peekable<I> {
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ match self.peeked.take() {
+ Some(v) => v,
+ None => self.iter.next(),
+ }
+ }
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn count(mut self) -> usize {
+ match self.peeked.take() {
+ Some(None) => 0,
+ Some(Some(_)) => 1 + self.iter.count(),
+ None => self.iter.count(),
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ match self.peeked.take() {
+ Some(None) => None,
+ Some(v @ Some(_)) if n == 0 => v,
+ Some(Some(_)) => self.iter.nth(n - 1),
+ None => self.iter.nth(n),
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<I::Item> {
+ let peek_opt = match self.peeked.take() {
+ Some(None) => return None,
+ Some(v) => v,
+ None => None,
+ };
+ self.iter.last().or(peek_opt)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let peek_len = match self.peeked {
+ Some(None) => return (0, Some(0)),
+ Some(Some(_)) => 1,
+ None => 0,
+ };
+ let (lo, hi) = self.iter.size_hint();
+ let lo = lo.saturating_add(peek_len);
+ let hi = match hi {
+ Some(x) => x.checked_add(peek_len),
+ None => None,
+ };
+ (lo, hi)
+ }
+
+ #[inline]
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ let acc = match self.peeked.take() {
+ Some(None) => return try { init },
+ Some(Some(v)) => f(init, v)?,
+ None => init,
+ };
+ self.iter.try_fold(acc, f)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let acc = match self.peeked {
+ Some(None) => return init,
+ Some(Some(v)) => fold(init, v),
+ None => init,
+ };
+ self.iter.fold(acc, fold)
+ }
+}
+
+#[stable(feature = "double_ended_peek_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for Peekable<I>
+where
+ I: DoubleEndedIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.peeked.as_mut() {
+ Some(v @ Some(_)) => self.iter.next_back().or_else(|| v.take()),
+ Some(None) => None,
+ None => self.iter.next_back(),
+ }
+ }
+
+ #[inline]
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ match self.peeked.take() {
+ Some(None) => try { init },
+ Some(Some(v)) => match self.iter.try_rfold(init, &mut f).into_result() {
+ Ok(acc) => f(acc, v),
+ Err(e) => {
+ self.peeked = Some(Some(v));
+ Try::from_error(e)
+ }
+ },
+ None => self.iter.try_rfold(init, f),
+ }
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ match self.peeked {
+ Some(None) => init,
+ Some(Some(v)) => {
+ let acc = self.iter.rfold(init, &mut fold);
+ fold(acc, v)
+ }
+ None => self.iter.rfold(init, fold),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator> ExactSizeIterator for Peekable<I> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator> FusedIterator for Peekable<I> {}
+
+impl<I: Iterator> Peekable<I> {
+ /// Returns a reference to the next() value without advancing the iterator.
+ ///
+ /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`.
+ /// But if the iteration is over, `None` is returned.
+ ///
+ /// [`next`]: Iterator::next
+ ///
+ /// Because `peek()` returns a reference, and many iterators iterate over
+ /// references, there can be a possibly confusing situation where the
+ /// return value is a double reference. You can see this effect in the
+ /// examples below.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let xs = [1, 2, 3];
+ ///
+ /// let mut iter = xs.iter().peekable();
+ ///
+ /// // peek() lets us see into the future
+ /// assert_eq!(iter.peek(), Some(&&1));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ ///
+ /// // The iterator does not advance even if we `peek` multiple times
+ /// assert_eq!(iter.peek(), Some(&&3));
+ /// assert_eq!(iter.peek(), Some(&&3));
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // After the iterator is finished, so is `peek()`
+ /// assert_eq!(iter.peek(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peek(&mut self) -> Option<&I::Item> {
+ let iter = &mut self.iter;
+ self.peeked.get_or_insert_with(|| iter.next()).as_ref()
+ }
+
+ /// Consume and return the next value of this iterator if a condition is true.
+ ///
+ /// If `func` returns `true` for the next value of this iterator, consume and return it.
+ /// Otherwise, return `None`.
+ ///
+ /// # Examples
+ /// Consume a number if it's equal to 0.
+ /// ```
+ /// #![feature(peekable_next_if)]
+ /// let mut iter = (0..5).peekable();
+ /// // The first item of the iterator is 0; consume it.
+ /// assert_eq!(iter.next_if(|&x| x == 0), Some(0));
+ /// // The next item returned is now 1, so `consume` will return `false`.
+ /// assert_eq!(iter.next_if(|&x| x == 0), None);
+ /// // `next_if` saves the value of the next item if it was not equal to `expected`.
+ /// assert_eq!(iter.next(), Some(1));
+ /// ```
+ ///
+ /// Consume any number less than 10.
+ /// ```
+ /// #![feature(peekable_next_if)]
+ /// let mut iter = (1..20).peekable();
+ /// // Consume all numbers less than 10
+ /// while iter.next_if(|&x| x < 10).is_some() {}
+ /// // The next value returned will be 10
+ /// assert_eq!(iter.next(), Some(10));
+ /// ```
+ #[unstable(feature = "peekable_next_if", issue = "72480")]
+ pub fn next_if(&mut self, func: impl FnOnce(&I::Item) -> bool) -> Option<I::Item> {
+ match self.next() {
+ Some(matched) if func(&matched) => Some(matched),
+ other => {
+ // Since we called `self.next()`, we consumed `self.peeked`.
+ assert!(self.peeked.is_none());
+ self.peeked = Some(other);
+ None
+ }
+ }
+ }
+
+ /// Consume and return the next item if it is equal to `expected`.
+ ///
+ /// # Example
+ /// Consume a number if it's equal to 0.
+ /// ```
+ /// #![feature(peekable_next_if)]
+ /// let mut iter = (0..5).peekable();
+ /// // The first item of the iterator is 0; consume it.
+ /// assert_eq!(iter.next_if_eq(&0), Some(0));
+ /// // The next item returned is now 1, so `consume` will return `false`.
+ /// assert_eq!(iter.next_if_eq(&0), None);
+ /// // `next_if_eq` saves the value of the next item if it was not equal to `expected`.
+ /// assert_eq!(iter.next(), Some(1));
+ /// ```
+ #[unstable(feature = "peekable_next_if", issue = "72480")]
+ pub fn next_if_eq<T>(&mut self, expected: &T) -> Option<I::Item>
+ where
+ T: ?Sized,
+ I::Item: PartialEq<T>,
+ {
+ self.next_if(|next| next == expected)
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I> TrustedLen for Peekable<I> where I: TrustedLen {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, I: Iterator> SourceIter for Peekable<I>
+where
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Peekable<I> {}
+
+/// An iterator that rejects elements while `predicate` returns `true`.
+///
+/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`skip_while`]: Iterator::skip_while
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct SkipWhile<I, P> {
+ iter: I,
+ flag: bool,
+ predicate: P,
+}
+impl<I, P> SkipWhile<I, P> {
+ pub(super) fn new(iter: I, predicate: P) -> SkipWhile<I, P> {
+ SkipWhile { iter, flag: false, predicate }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for SkipWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SkipWhile").field("iter", &self.iter).field("flag", &self.flag).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, P> Iterator for SkipWhile<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ fn check<'a, T>(
+ flag: &'a mut bool,
+ pred: &'a mut impl FnMut(&T) -> bool,
+ ) -> impl FnMut(&T) -> bool + 'a {
+ move |x| {
+ if *flag || !pred(x) {
+ *flag = true;
+ true
+ } else {
+ false
+ }
+ }
+ }
+
+ let flag = &mut self.flag;
+ let pred = &mut self.predicate;
+ self.iter.find(check(flag, pred))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, mut init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ if !self.flag {
+ match self.next() {
+ Some(v) => init = fold(init, v)?,
+ None => return try { init },
+ }
+ }
+ self.iter.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, mut init: Acc, mut fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if !self.flag {
+ match self.next() {
+ Some(v) => init = fold(init, v),
+ None => return init,
+ }
+ }
+ self.iter.fold(init, fold)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I, P> FusedIterator for SkipWhile<I, P>
+where
+ I: FusedIterator,
+ P: FnMut(&I::Item) -> bool,
+{
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, P, I: Iterator> SourceIter for SkipWhile<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for SkipWhile<I, F> where
+ F: FnMut(&I::Item) -> bool
+{
+}
+
+/// An iterator that only accepts elements while `predicate` returns `true`.
+///
+/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`take_while`]: Iterator::take_while
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct TakeWhile<I, P> {
+ iter: I,
+ flag: bool,
+ predicate: P,
+}
+impl<I, P> TakeWhile<I, P> {
+ pub(super) fn new(iter: I, predicate: P) -> TakeWhile<I, P> {
+ TakeWhile { iter, flag: false, predicate }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, P> fmt::Debug for TakeWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TakeWhile").field("iter", &self.iter).field("flag", &self.flag).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, P> Iterator for TakeWhile<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ if self.flag {
+ None
+ } else {
+ let x = self.iter.next()?;
+ if (self.predicate)(&x) {
+ Some(x)
+ } else {
+ self.flag = true;
+ None
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.flag {
+ (0, Some(0))
+ } else {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ fn check<'a, T, Acc, R: Try<Ok = Acc>>(
+ flag: &'a mut bool,
+ p: &'a mut impl FnMut(&T) -> bool,
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
+ move |acc, x| {
+ if p(&x) {
+ ControlFlow::from_try(fold(acc, x))
+ } else {
+ *flag = true;
+ ControlFlow::Break(try { acc })
+ }
+ }
+ }
+
+ if self.flag {
+ try { init }
+ } else {
+ let flag = &mut self.flag;
+ let p = &mut self.predicate;
+ self.iter.try_fold(init, check(flag, p, fold)).into_try()
+ }
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I, P> FusedIterator for TakeWhile<I, P>
+where
+ I: FusedIterator,
+ P: FnMut(&I::Item) -> bool,
+{
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, P, I: Iterator> SourceIter for TakeWhile<I, P>
+where
+ P: FnMut(&I::Item) -> bool,
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for TakeWhile<I, F> where
+ F: FnMut(&I::Item) -> bool
+{
+}
+
+/// An iterator that only accepts elements while `predicate` returns `Some(_)`.
+///
+/// This `struct` is created by the [`map_while`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`map_while`]: Iterator::map_while
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")]
+#[derive(Clone)]
+pub struct MapWhile<I, P> {
+ iter: I,
+ predicate: P,
+}
+
+impl<I, P> MapWhile<I, P> {
+ pub(super) fn new(iter: I, predicate: P) -> MapWhile<I, P> {
+ MapWhile { iter, predicate }
+ }
+}
+
+#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")]
+impl<I: fmt::Debug, P> fmt::Debug for MapWhile<I, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("MapWhile").field("iter", &self.iter).finish()
+ }
+}
+
+#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")]
+impl<B, I: Iterator, P> Iterator for MapWhile<I, P>
+where
+ P: FnMut(I::Item) -> Option<B>,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ let x = self.iter.next()?;
+ (self.predicate)(x)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the predicate
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, mut fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ let Self { iter, predicate } = self;
+ iter.try_fold(init, |acc, x| match predicate(x) {
+ Some(item) => ControlFlow::from_try(fold(acc, item)),
+ None => ControlFlow::Break(try { acc }),
+ })
+ .into_try()
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, B, I: Iterator, P> SourceIter for MapWhile<I, P>
+where
+ P: FnMut(I::Item) -> Option<B>,
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<B, I: InPlaceIterable, P> InPlaceIterable for MapWhile<I, P> where
+ P: FnMut(I::Item) -> Option<B>
+{
+}
+
+/// An iterator that skips over `n` elements of `iter`.
+///
+/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`skip`]: Iterator::skip
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Skip<I> {
+ iter: I,
+ n: usize,
+}
+impl<I> Skip<I> {
+ pub(super) fn new(iter: I, n: usize) -> Skip<I> {
+ Skip { iter, n }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Skip<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ if self.n == 0 {
+ self.iter.next()
+ } else {
+ let old_n = self.n;
+ self.n = 0;
+ self.iter.nth(old_n)
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ // Can't just add n + self.n due to overflow.
+ if self.n > 0 {
+ let to_skip = self.n;
+ self.n = 0;
+ // nth(n) skips n+1
+ self.iter.nth(to_skip - 1)?;
+ }
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn count(mut self) -> usize {
+ if self.n > 0 {
+ // nth(n) skips n+1
+ if self.iter.nth(self.n - 1).is_none() {
+ return 0;
+ }
+ }
+ self.iter.count()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<I::Item> {
+ if self.n > 0 {
+ // nth(n) skips n+1
+ self.iter.nth(self.n - 1)?;
+ }
+ self.iter.last()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper) = self.iter.size_hint();
+
+ let lower = lower.saturating_sub(self.n);
+ let upper = match upper {
+ Some(x) => Some(x.saturating_sub(self.n)),
+ None => None,
+ };
+
+ (lower, upper)
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ let n = self.n;
+ self.n = 0;
+ if n > 0 {
+ // nth(n) skips n+1
+ if self.iter.nth(n - 1).is_none() {
+ return try { init };
+ }
+ }
+ self.iter.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if self.n > 0 {
+ // nth(n) skips n+1
+ if self.iter.nth(self.n - 1).is_none() {
+ return init;
+ }
+ }
+ self.iter.fold(init, fold)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Skip<I> where I: ExactSizeIterator {}
+
+#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")]
+impl<I> DoubleEndedIterator for Skip<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.len() > 0 { self.iter.next_back() } else { None }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ let len = self.len();
+ if n < len {
+ self.iter.nth_back(n)
+ } else {
+ if len > 0 {
+ // consume the original iterator
+ self.iter.nth_back(len - 1);
+ }
+ None
+ }
+ }
+
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ fn check<T, Acc, R: Try<Ok = Acc>>(
+ mut n: usize,
+ mut fold: impl FnMut(Acc, T) -> R,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> {
+ move |acc, x| {
+ n -= 1;
+ let r = fold(acc, x);
+ if n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
+ }
+ }
+
+ let n = self.len();
+ if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() }
+ }
+
+ fn rfold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<Acc, T>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, T) -> Result<Acc, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_rfold(init, ok(fold)).unwrap()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Skip<I> where I: FusedIterator {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, I: Iterator> SourceIter for Skip<I>
+where
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Skip<I> {}
+
+/// An iterator that only iterates over the first `n` iterations of `iter`.
+///
+/// This `struct` is created by the [`take`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`take`]: Iterator::take
+/// [`Iterator`]: trait.Iterator.html
+#[derive(Clone, Debug)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Take<I> {
+ pub(super) iter: I,
+ pub(super) n: usize,
+}
+impl<I> Take<I> {
+ pub(super) fn new(iter: I, n: usize) -> Take<I> {
+ Take { iter, n }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> Iterator for Take<I>
+where
+ I: Iterator,
+{
+ type Item = <I as Iterator>::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<<I as Iterator>::Item> {
+ if self.n != 0 {
+ self.n -= 1;
+ self.iter.next()
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ if self.n > n {
+ self.n -= n + 1;
+ self.iter.nth(n)
+ } else {
+ if self.n > 0 {
+ self.iter.nth(self.n - 1);
+ self.n = 0;
+ }
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.n == 0 {
+ return (0, Some(0));
+ }
+
+ let (lower, upper) = self.iter.size_hint();
+
+ let lower = cmp::min(lower, self.n);
+
+ let upper = match upper {
+ Some(x) if x < self.n => Some(x),
+ _ => Some(self.n),
+ };
+
+ (lower, upper)
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ fn check<'a, T, Acc, R: Try<Ok = Acc>>(
+ n: &'a mut usize,
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
+ move |acc, x| {
+ *n -= 1;
+ let r = fold(acc, x);
+ if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
+ }
+ }
+
+ if self.n == 0 {
+ try { init }
+ } else {
+ let n = &mut self.n;
+ self.iter.try_fold(init, check(n, fold)).into_try()
+ }
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, I: Iterator> SourceIter for Take<I>
+where
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable> InPlaceIterable for Take<I> {}
+
+#[stable(feature = "double_ended_take_iterator", since = "1.38.0")]
+impl<I> DoubleEndedIterator for Take<I>
+where
+ I: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ if self.n == 0 {
+ None
+ } else {
+ let n = self.n;
+ self.n -= 1;
+ self.iter.nth_back(self.iter.len().saturating_sub(n))
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.iter.len();
+ if self.n > n {
+ let m = len.saturating_sub(self.n) + n;
+ self.n -= n + 1;
+ self.iter.nth_back(m)
+ } else {
+ if len > 0 {
+ self.iter.nth_back(len - 1);
+ }
+ None
+ }
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ if self.n == 0 {
+ try { init }
+ } else {
+ let len = self.iter.len();
+ if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() {
+ try { init }
+ } else {
+ self.iter.try_rfold(init, fold)
+ }
+ }
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ if self.n == 0 {
+ init
+ } else {
+ let len = self.iter.len();
+ if len > self.n && self.iter.nth_back(len - self.n - 1).is_none() {
+ init
+ } else {
+ self.iter.rfold(init, fold)
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ExactSizeIterator for Take<I> where I: ExactSizeIterator {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I> FusedIterator for Take<I> where I: FusedIterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I: TrustedLen> TrustedLen for Take<I> {}
+
+/// An iterator to maintain state while iterating another iterator.
+///
+/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`scan`]: Iterator::scan
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Scan<I, St, F> {
+ iter: I,
+ f: F,
+ state: St,
+}
+impl<I, St, F> Scan<I, St, F> {
+ pub(super) fn new(iter: I, state: St, f: F) -> Scan<I, St, F> {
+ Scan { iter, state, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, St: fmt::Debug, F> fmt::Debug for Scan<I, St, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Scan").field("iter", &self.iter).field("state", &self.state).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B, I, St, F> Iterator for Scan<I, St, F>
+where
+ I: Iterator,
+ F: FnMut(&mut St, I::Item) -> Option<B>,
+{
+ type Item = B;
+
+ #[inline]
+ fn next(&mut self) -> Option<B> {
+ let a = self.iter.next()?;
+ (self.f)(&mut self.state, a)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper) // can't know a lower bound, due to the scan function
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ fn scan<'a, T, St, B, Acc, R: Try<Ok = Acc>>(
+ state: &'a mut St,
+ f: &'a mut impl FnMut(&mut St, T) -> Option<B>,
+ mut fold: impl FnMut(Acc, B) -> R + 'a,
+ ) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
+ move |acc, x| match f(state, x) {
+ None => ControlFlow::Break(try { acc }),
+ Some(x) => ControlFlow::from_try(fold(acc, x)),
+ }
+ }
+
+ let state = &mut self.state;
+ let f = &mut self.f;
+ self.iter.try_fold(init, scan(state, f, fold)).into_try()
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<St, F, B, S: Iterator, I: Iterator> SourceIter for Scan<I, St, F>
+where
+ I: SourceIter<Source = S>,
+ F: FnMut(&mut St, I::Item) -> Option<B>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<St, F, B, I: InPlaceIterable> InPlaceIterable for Scan<I, St, F> where
+ F: FnMut(&mut St, I::Item) -> Option<B>
+{
+}
+
+/// An iterator that calls a function with a reference to each element before
+/// yielding it.
+///
+/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its
+/// documentation for more.
+///
+/// [`inspect`]: Iterator::inspect
+/// [`Iterator`]: trait.Iterator.html
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct Inspect<I, F> {
+ iter: I,
+ f: F,
+}
+impl<I, F> Inspect<I, F> {
+ pub(super) fn new(iter: I, f: F) -> Inspect<I, F> {
+ Inspect { iter, f }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<I: fmt::Debug, F> fmt::Debug for Inspect<I, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Inspect").field("iter", &self.iter).finish()
+ }
+}
+
+impl<I: Iterator, F> Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ #[inline]
+ fn do_inspect(&mut self, elt: Option<I::Item>) -> Option<I::Item> {
+ if let Some(ref a) = elt {
+ (self.f)(a);
+ }
+
+ elt
+ }
+}
+
+fn inspect_fold<T, Acc>(
+ mut f: impl FnMut(&T),
+ mut fold: impl FnMut(Acc, T) -> Acc,
+) -> impl FnMut(Acc, T) -> Acc {
+ move |acc, item| {
+ f(&item);
+ fold(acc, item)
+ }
+}
+
+fn inspect_try_fold<'a, T, Acc, R>(
+ f: &'a mut impl FnMut(&T),
+ mut fold: impl FnMut(Acc, T) -> R + 'a,
+) -> impl FnMut(Acc, T) -> R + 'a {
+ move |acc, item| {
+ f(&item);
+ fold(acc, item)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, F> Iterator for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ type Item = I::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<I::Item> {
+ let next = self.iter.next();
+ self.do_inspect(next)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_fold(init, inspect_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.fold(init, inspect_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator, F> DoubleEndedIterator for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<I::Item> {
+ let next = self.iter.next_back();
+ self.do_inspect(next)
+ }
+
+ #[inline]
+ fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
+ where
+ Self: Sized,
+ Fold: FnMut(Acc, Self::Item) -> R,
+ R: Try<Ok = Acc>,
+ {
+ self.iter.try_rfold(init, inspect_try_fold(&mut self.f, fold))
+ }
+
+ #[inline]
+ fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where
+ Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.iter.rfold(init, inspect_fold(self.f, fold))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator, F> ExactSizeIterator for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+{
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator, F> FusedIterator for Inspect<I, F> where F: FnMut(&I::Item) {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S: Iterator, I: Iterator, F> SourceIter for Inspect<I, F>
+where
+ F: FnMut(&I::Item),
+ I: SourceIter<Source = S>,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.iter) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<I: InPlaceIterable, F> InPlaceIterable for Inspect<I, F> where F: FnMut(&I::Item) {}
+
+/// An iterator adapter that produces output as long as the underlying
+/// iterator produces `Result::Ok` values.
+///
+/// If an error is encountered, the iterator stops and the error is
+/// stored.
+pub(crate) struct ResultShunt<'a, I, E> {
+ iter: I,
+ error: &'a mut Result<(), E>,
+}
+
+/// Process the given iterator as if it yielded a `T` instead of a
+/// `Result<T, _>`. Any errors will stop the inner iterator and
+/// the overall result will be an error.
+pub(crate) fn process_results<I, T, E, F, U>(iter: I, mut f: F) -> Result<U, E>
+where
+ I: Iterator<Item = Result<T, E>>,
+ for<'a> F: FnMut(ResultShunt<'a, I, E>) -> U,
+{
+ let mut error = Ok(());
+ let shunt = ResultShunt { iter, error: &mut error };
+ let value = f(shunt);
+ error.map(|()| value)
+}
+
+impl<I, T, E> Iterator for ResultShunt<'_, I, E>
+where
+ I: Iterator<Item = Result<T, E>>,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.find(|_| true)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.error.is_err() {
+ (0, Some(0))
+ } else {
+ let (_, upper) = self.iter.size_hint();
+ (0, upper)
+ }
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ let error = &mut *self.error;
+ self.iter
+ .try_fold(init, |acc, x| match x {
+ Ok(x) => ControlFlow::from_try(f(acc, x)),
+ Err(e) => {
+ *error = Err(e);
+ ControlFlow::Break(try { acc })
+ }
+ })
+ .into_try()
+ }
+
+ fn fold<B, F>(mut self, init: B, fold: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(fold)).unwrap()
+ }
+}
--- /dev/null
+use crate::cmp;
+use crate::fmt::{self, Debug};
+
+use super::super::{
+ DoubleEndedIterator, ExactSizeIterator, FusedIterator, InPlaceIterable, Iterator, SourceIter,
+ TrustedLen,
+};
+
+/// An iterator that iterates two other iterators simultaneously.
+///
+/// This `struct` is created by [`Iterator::zip`]. See its documentation
+/// for more.
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Zip<A, B> {
+ a: A,
+ b: B,
+ // index and len are only used by the specialized version of zip
+ index: usize,
+ len: usize,
+}
+impl<A: Iterator, B: Iterator> Zip<A, B> {
+ pub(in super::super) fn new(a: A, b: B) -> Zip<A, B> {
+ ZipImpl::new(a, b)
+ }
+ fn super_nth(&mut self, mut n: usize) -> Option<(A::Item, B::Item)> {
+ while let Some(x) = Iterator::next(self) {
+ if n == 0 {
+ return Some(x);
+ }
+ n -= 1;
+ }
+ None
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> Iterator for Zip<A, B>
+where
+ A: Iterator,
+ B: Iterator,
+{
+ type Item = (A::Item, B::Item);
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ ZipImpl::next(self)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ ZipImpl::size_hint(self)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ ZipImpl::nth(self, n)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ // SAFETY: `ZipImpl::__iterator_get_unchecked` has same safety
+ // requirements as `Iterator::__iterator_get_unchecked`.
+ unsafe { ZipImpl::get_unchecked(self, idx) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> DoubleEndedIterator for Zip<A, B>
+where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<(A::Item, B::Item)> {
+ ZipImpl::next_back(self)
+ }
+}
+
+// Zip specialization trait
+#[doc(hidden)]
+trait ZipImpl<A, B> {
+ type Item;
+ fn new(a: A, b: B) -> Self;
+ fn next(&mut self) -> Option<Self::Item>;
+ fn size_hint(&self) -> (usize, Option<usize>);
+ fn nth(&mut self, n: usize) -> Option<Self::Item>;
+ fn next_back(&mut self) -> Option<Self::Item>
+ where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator;
+ // This has the same safety requirements as `Iterator::__iterator_get_unchecked`
+ unsafe fn get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item
+ where
+ Self: Iterator + TrustedRandomAccess;
+}
+
+// General Zip impl
+#[doc(hidden)]
+impl<A, B> ZipImpl<A, B> for Zip<A, B>
+where
+ A: Iterator,
+ B: Iterator,
+{
+ type Item = (A::Item, B::Item);
+ default fn new(a: A, b: B) -> Self {
+ Zip {
+ a,
+ b,
+ index: 0, // unused
+ len: 0, // unused
+ }
+ }
+
+ #[inline]
+ default fn next(&mut self) -> Option<(A::Item, B::Item)> {
+ let x = self.a.next()?;
+ let y = self.b.next()?;
+ Some((x, y))
+ }
+
+ #[inline]
+ default fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.super_nth(n)
+ }
+
+ #[inline]
+ default fn next_back(&mut self) -> Option<(A::Item, B::Item)>
+ where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator,
+ {
+ let a_sz = self.a.len();
+ let b_sz = self.b.len();
+ if a_sz != b_sz {
+ // Adjust a, b to equal length
+ if a_sz > b_sz {
+ for _ in 0..a_sz - b_sz {
+ self.a.next_back();
+ }
+ } else {
+ for _ in 0..b_sz - a_sz {
+ self.b.next_back();
+ }
+ }
+ }
+ match (self.a.next_back(), self.b.next_back()) {
+ (Some(x), Some(y)) => Some((x, y)),
+ (None, None) => None,
+ _ => unreachable!(),
+ }
+ }
+
+ #[inline]
+ default fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_lower, a_upper) = self.a.size_hint();
+ let (b_lower, b_upper) = self.b.size_hint();
+
+ let lower = cmp::min(a_lower, b_lower);
+
+ let upper = match (a_upper, b_upper) {
+ (Some(x), Some(y)) => Some(cmp::min(x, y)),
+ (Some(x), None) => Some(x),
+ (None, Some(y)) => Some(y),
+ (None, None) => None,
+ };
+
+ (lower, upper)
+ }
+
+ default unsafe fn get_unchecked(&mut self, _idx: usize) -> <Self as Iterator>::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ unreachable!("Always specialized");
+ }
+}
+
+#[doc(hidden)]
+impl<A, B> ZipImpl<A, B> for Zip<A, B>
+where
+ A: TrustedRandomAccess + Iterator,
+ B: TrustedRandomAccess + Iterator,
+{
+ fn new(a: A, b: B) -> Self {
+ let len = cmp::min(a.size(), b.size());
+ Zip { a, b, index: 0, len }
+ }
+
+ #[inline]
+ fn next(&mut self) -> Option<(A::Item, B::Item)> {
+ if self.index < self.len {
+ let i = self.index;
+ self.index += 1;
+ // SAFETY: `i` is smaller than `self.len`, thus smaller than `self.a.len()` and `self.b.len()`
+ unsafe {
+ Some((self.a.__iterator_get_unchecked(i), self.b.__iterator_get_unchecked(i)))
+ }
+ } else if A::may_have_side_effect() && self.index < self.a.size() {
+ // match the base implementation's potential side effects
+ // SAFETY: we just checked that `self.index` < `self.a.len()`
+ unsafe {
+ self.a.__iterator_get_unchecked(self.index);
+ }
+ self.index += 1;
+ None
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len - self.index;
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let delta = cmp::min(n, self.len - self.index);
+ let end = self.index + delta;
+ while self.index < end {
+ let i = self.index;
+ self.index += 1;
+ if A::may_have_side_effect() {
+ // SAFETY: the usage of `cmp::min` to calculate `delta`
+ // ensures that `end` is smaller than or equal to `self.len`,
+ // so `i` is also smaller than `self.len`.
+ unsafe {
+ self.a.__iterator_get_unchecked(i);
+ }
+ }
+ if B::may_have_side_effect() {
+ // SAFETY: same as above.
+ unsafe {
+ self.b.__iterator_get_unchecked(i);
+ }
+ }
+ }
+
+ self.super_nth(n - delta)
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<(A::Item, B::Item)>
+ where
+ A: DoubleEndedIterator + ExactSizeIterator,
+ B: DoubleEndedIterator + ExactSizeIterator,
+ {
+ let a_side_effect = A::may_have_side_effect();
+ let b_side_effect = B::may_have_side_effect();
+ if a_side_effect || b_side_effect {
+ let sz_a = self.a.size();
+ let sz_b = self.b.size();
+ // Adjust a, b to equal length, make sure that only the first call
+ // of `next_back` does this, otherwise we will break the restriction
+ // on calls to `self.next_back()` after calling `get_unchecked()`.
+ if sz_a != sz_b {
+ let sz_a = self.a.size();
+ if a_side_effect && sz_a > self.len {
+ for _ in 0..sz_a - cmp::max(self.len, self.index) {
+ self.a.next_back();
+ }
+ }
+ let sz_b = self.b.size();
+ if b_side_effect && sz_b > self.len {
+ for _ in 0..sz_b - self.len {
+ self.b.next_back();
+ }
+ }
+ }
+ }
+ if self.index < self.len {
+ self.len -= 1;
+ let i = self.len;
+ // SAFETY: `i` is smaller than the previous value of `self.len`,
+ // which is also smaller than or equal to `self.a.len()` and `self.b.len()`
+ unsafe {
+ Some((self.a.__iterator_get_unchecked(i), self.b.__iterator_get_unchecked(i)))
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { (self.a.__iterator_get_unchecked(idx), self.b.__iterator_get_unchecked(idx)) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> ExactSizeIterator for Zip<A, B>
+where
+ A: ExactSizeIterator,
+ B: ExactSizeIterator,
+{
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<A, B> TrustedRandomAccess for Zip<A, B>
+where
+ A: TrustedRandomAccess,
+ B: TrustedRandomAccess,
+{
+ fn may_have_side_effect() -> bool {
+ A::may_have_side_effect() || B::may_have_side_effect()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A, B> FusedIterator for Zip<A, B>
+where
+ A: FusedIterator,
+ B: FusedIterator,
+{
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A, B> TrustedLen for Zip<A, B>
+where
+ A: TrustedLen,
+ B: TrustedLen,
+{
+}
+
+// Arbitrarily selects the left side of the zip iteration as extractable "source"
+// it would require negative trait bounds to be able to try both
+#[unstable(issue = "none", feature = "inplace_iteration")]
+unsafe impl<S, A, B> SourceIter for Zip<A, B>
+where
+ A: SourceIter<Source = S>,
+ B: Iterator,
+ S: Iterator,
+{
+ type Source = S;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut S {
+ // SAFETY: unsafe function forwarding to unsafe function with the same requirements
+ unsafe { SourceIter::as_inner(&mut self.a) }
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+// Limited to Item: Copy since interaction between Zip's use of TrustedRandomAccess
+// and Drop implementation of the source is unclear.
+//
+// An additional method returning the number of times the source has been logically advanced
+// (without calling next()) would be needed to properly drop the remainder of the source.
+unsafe impl<A: InPlaceIterable, B: Iterator> InPlaceIterable for Zip<A, B> where A::Item: Copy {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Debug, B: Debug> Debug for Zip<A, B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ZipFmt::fmt(self, f)
+ }
+}
+
+trait ZipFmt<A, B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result;
+}
+
+impl<A: Debug, B: Debug> ZipFmt<A, B> for Zip<A, B> {
+ default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Zip").field("a", &self.a).field("b", &self.b).finish()
+ }
+}
+
+impl<A: Debug + TrustedRandomAccess, B: Debug + TrustedRandomAccess> ZipFmt<A, B> for Zip<A, B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // It's *not safe* to call fmt on the contained iterators, since once
+ // we start iterating they're in strange, potentially unsafe, states.
+ f.debug_struct("Zip").finish()
+ }
+}
+
+/// An iterator whose items are random-accessible efficiently
+///
+/// # Safety
+///
+/// The iterator's `size_hint` must be exact and cheap to call.
+///
+/// `size` may not be overridden.
+///
+/// `<Self as Iterator>::__iterator_get_unchecked` must be safe to call
+/// provided the following conditions are met.
+///
+/// 1. `0 <= idx` and `idx < self.size()`.
+/// 2. If `self: !Clone`, then `get_unchecked` is never called with the same
+/// index on `self` more than once.
+/// 3. After `self.get_unchecked(idx)` has been called then `next_back` will
+/// only be called at most `self.size() - idx - 1` times.
+/// 4. After `get_unchecked` is called, then only the following methods will be
+/// called on `self`:
+/// * `std::clone::Clone::clone`
+/// * `std::iter::Iterator::size_hint()`
+/// * `std::iter::Iterator::next_back()`
+/// * `std::iter::Iterator::__iterator_get_unchecked()`
+/// * `std::iter::TrustedRandomAccess::size()`
+///
+/// Further, given that these conditions are met, it must guarantee that:
+///
+/// * It does not change the value returned from `size_hint`
+/// * It must be safe to call the methods listed above on `self` after calling
+/// `get_unchecked`, assuming that the required traits are implemented.
+/// * It must also be safe to drop `self` after calling `get_unchecked`.
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+#[rustc_specialization_trait]
+pub unsafe trait TrustedRandomAccess: Sized {
+ // Convenience method.
+ fn size(&self) -> usize
+ where
+ Self: Iterator,
+ {
+ self.size_hint().0
+ }
+ /// Returns `true` if getting an iterator element may have
+ /// side effects. Remember to take inner iterators into account.
+ fn may_have_side_effect() -> bool;
+}
+
+/// Like `Iterator::__iterator_get_unchecked`, but doesn't require the compiler to
+/// know that `U: TrustedRandomAccess`.
+///
+/// ## Safety
+///
+/// Same requirements calling `get_unchecked` directly.
+#[doc(hidden)]
+pub(in crate::iter::adapters) unsafe fn try_get_unchecked<I>(it: &mut I, idx: usize) -> I::Item
+where
+ I: Iterator,
+{
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { it.try_get_unchecked(idx) }
+}
+
+unsafe trait SpecTrustedRandomAccess: Iterator {
+ /// If `Self: TrustedRandomAccess`, it must be safe to call a
+ /// `Iterator::__iterator_get_unchecked(self, index)`.
+ unsafe fn try_get_unchecked(&mut self, index: usize) -> Self::Item;
+}
+
+unsafe impl<I: Iterator> SpecTrustedRandomAccess for I {
+ default unsafe fn try_get_unchecked(&mut self, _: usize) -> Self::Item {
+ panic!("Should only be called on TrustedRandomAccess iterators");
+ }
+}
+
+unsafe impl<I: Iterator + TrustedRandomAccess> SpecTrustedRandomAccess for I {
+ unsafe fn try_get_unchecked(&mut self, index: usize) -> Self::Item {
+ // SAFETY: the caller must uphold the contract for
+ // `Iterator::__iterator_get_unchecked`.
+ unsafe { self.__iterator_get_unchecked(index) }
+ }
+}
--- /dev/null
+//! Composable external iteration.
+//!
+//! If you've found yourself with a collection of some kind, and needed to
+//! perform an operation on the elements of said collection, you'll quickly run
+//! into 'iterators'. Iterators are heavily used in idiomatic Rust code, so
+//! it's worth becoming familiar with them.
+//!
+//! Before explaining more, let's talk about how this module is structured:
+//!
+//! # Organization
+//!
+//! This module is largely organized by type:
+//!
+//! * [Traits] are the core portion: these traits define what kind of iterators
+//! exist and what you can do with them. The methods of these traits are worth
+//! putting some extra study time into.
+//! * [Functions] provide some helpful ways to create some basic iterators.
+//! * [Structs] are often the return types of the various methods on this
+//! module's traits. You'll usually want to look at the method that creates
+//! the `struct`, rather than the `struct` itself. For more detail about why,
+//! see '[Implementing Iterator](#implementing-iterator)'.
+//!
+//! [Traits]: #traits
+//! [Functions]: #functions
+//! [Structs]: #structs
+//!
+//! That's it! Let's dig into iterators.
+//!
+//! # Iterator
+//!
+//! The heart and soul of this module is the [`Iterator`] trait. The core of
+//! [`Iterator`] looks like this:
+//!
+//! ```
+//! trait Iterator {
+//! type Item;
+//! fn next(&mut self) -> Option<Self::Item>;
+//! }
+//! ```
+//!
+//! An iterator has a method, [`next`], which when called, returns an
+//! [`Option`]`<Item>`. [`next`] will return [`Some(Item)`] as long as there
+//! are elements, and once they've all been exhausted, will return `None` to
+//! indicate that iteration is finished. Individual iterators may choose to
+//! resume iteration, and so calling [`next`] again may or may not eventually
+//! start returning [`Some(Item)`] again at some point (for example, see [`TryIter`]).
+//!
+//! [`Iterator`]'s full definition includes a number of other methods as well,
+//! but they are default methods, built on top of [`next`], and so you get
+//! them for free.
+//!
+//! Iterators are also composable, and it's common to chain them together to do
+//! more complex forms of processing. See the [Adapters](#adapters) section
+//! below for more details.
+//!
+//! [`Some(Item)`]: Some
+//! [`next`]: Iterator::next
+//! [`TryIter`]: ../../std/sync/mpsc/struct.TryIter.html
+//!
+//! # The three forms of iteration
+//!
+//! There are three common methods which can create iterators from a collection:
+//!
+//! * `iter()`, which iterates over `&T`.
+//! * `iter_mut()`, which iterates over `&mut T`.
+//! * `into_iter()`, which iterates over `T`.
+//!
+//! Various things in the standard library may implement one or more of the
+//! three, where appropriate.
+//!
+//! # Implementing Iterator
+//!
+//! Creating an iterator of your own involves two steps: creating a `struct` to
+//! hold the iterator's state, and then implementing [`Iterator`] for that `struct`.
+//! This is why there are so many `struct`s in this module: there is one for
+//! each iterator and iterator adapter.
+//!
+//! Let's make an iterator named `Counter` which counts from `1` to `5`:
+//!
+//! ```
+//! // First, the struct:
+//!
+//! /// An iterator which counts from one to five
+//! struct Counter {
+//! count: usize,
+//! }
+//!
+//! // we want our count to start at one, so let's add a new() method to help.
+//! // This isn't strictly necessary, but is convenient. Note that we start
+//! // `count` at zero, we'll see why in `next()`'s implementation below.
+//! impl Counter {
+//! fn new() -> Counter {
+//! Counter { count: 0 }
+//! }
+//! }
+//!
+//! // Then, we implement `Iterator` for our `Counter`:
+//!
+//! impl Iterator for Counter {
+//! // we will be counting with usize
+//! type Item = usize;
+//!
+//! // next() is the only required method
+//! fn next(&mut self) -> Option<Self::Item> {
+//! // Increment our count. This is why we started at zero.
+//! self.count += 1;
+//!
+//! // Check to see if we've finished counting or not.
+//! if self.count < 6 {
+//! Some(self.count)
+//! } else {
+//! None
+//! }
+//! }
+//! }
+//!
+//! // And now we can use it!
+//!
+//! let mut counter = Counter::new();
+//!
+//! assert_eq!(counter.next(), Some(1));
+//! assert_eq!(counter.next(), Some(2));
+//! assert_eq!(counter.next(), Some(3));
+//! assert_eq!(counter.next(), Some(4));
+//! assert_eq!(counter.next(), Some(5));
+//! assert_eq!(counter.next(), None);
+//! ```
+//!
+//! Calling [`next`] this way gets repetitive. Rust has a construct which can
+//! call [`next`] on your iterator, until it reaches `None`. Let's go over that
+//! next.
+//!
+//! Also note that `Iterator` provides a default implementation of methods such as `nth` and `fold`
+//! which call `next` internally. However, it is also possible to write a custom implementation of
+//! methods like `nth` and `fold` if an iterator can compute them more efficiently without calling
+//! `next`.
+//!
+//! # `for` loops and `IntoIterator`
+//!
+//! Rust's `for` loop syntax is actually sugar for iterators. Here's a basic
+//! example of `for`:
+//!
+//! ```
+//! let values = vec![1, 2, 3, 4, 5];
+//!
+//! for x in values {
+//! println!("{}", x);
+//! }
+//! ```
+//!
+//! This will print the numbers one through five, each on their own line. But
+//! you'll notice something here: we never called anything on our vector to
+//! produce an iterator. What gives?
+//!
+//! There's a trait in the standard library for converting something into an
+//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter`],
+//! which converts the thing implementing [`IntoIterator`] into an iterator.
+//! Let's take a look at that `for` loop again, and what the compiler converts
+//! it into:
+//!
+//! [`into_iter`]: IntoIterator::into_iter
+//!
+//! ```
+//! let values = vec![1, 2, 3, 4, 5];
+//!
+//! for x in values {
+//! println!("{}", x);
+//! }
+//! ```
+//!
+//! Rust de-sugars this into:
+//!
+//! ```
+//! let values = vec![1, 2, 3, 4, 5];
+//! {
+//! let result = match IntoIterator::into_iter(values) {
+//! mut iter => loop {
+//! let next;
+//! match iter.next() {
+//! Some(val) => next = val,
+//! None => break,
+//! };
+//! let x = next;
+//! let () = { println!("{}", x); };
+//! },
+//! };
+//! result
+//! }
+//! ```
+//!
+//! First, we call `into_iter()` on the value. Then, we match on the iterator
+//! that returns, calling [`next`] over and over until we see a `None`. At
+//! that point, we `break` out of the loop, and we're done iterating.
+//!
+//! There's one more subtle bit here: the standard library contains an
+//! interesting implementation of [`IntoIterator`]:
+//!
+//! ```ignore (only-for-syntax-highlight)
+//! impl<I: Iterator> IntoIterator for I
+//! ```
+//!
+//! In other words, all [`Iterator`]s implement [`IntoIterator`], by just
+//! returning themselves. This means two things:
+//!
+//! 1. If you're writing an [`Iterator`], you can use it with a `for` loop.
+//! 2. If you're creating a collection, implementing [`IntoIterator`] for it
+//! will allow your collection to be used with the `for` loop.
+//!
+//! # Adapters
+//!
+//! Functions which take an [`Iterator`] and return another [`Iterator`] are
+//! often called 'iterator adapters', as they're a form of the 'adapter
+//! pattern'.
+//!
+//! Common iterator adapters include [`map`], [`take`], and [`filter`].
+//! For more, see their documentation.
+//!
+//! If an iterator adapter panics, the iterator will be in an unspecified (but
+//! memory safe) state. This state is also not guaranteed to stay the same
+//! across versions of Rust, so you should avoid relying on the exact values
+//! returned by an iterator which panicked.
+//!
+//! [`map`]: Iterator::map
+//! [`take`]: Iterator::take
+//! [`filter`]: Iterator::filter
+//!
+//! # Laziness
+//!
+//! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that
+//! just creating an iterator doesn't _do_ a whole lot. Nothing really happens
+//! until you call [`next`]. This is sometimes a source of confusion when
+//! creating an iterator solely for its side effects. For example, the [`map`]
+//! method calls a closure on each element it iterates over:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! let v = vec![1, 2, 3, 4, 5];
+//! v.iter().map(|x| println!("{}", x));
+//! ```
+//!
+//! This will not print any values, as we only created an iterator, rather than
+//! using it. The compiler will warn us about this kind of behavior:
+//!
+//! ```text
+//! warning: unused result that must be used: iterators are lazy and
+//! do nothing unless consumed
+//! ```
+//!
+//! The idiomatic way to write a [`map`] for its side effects is to use a
+//! `for` loop or call the [`for_each`] method:
+//!
+//! ```
+//! let v = vec![1, 2, 3, 4, 5];
+//!
+//! v.iter().for_each(|x| println!("{}", x));
+//! // or
+//! for x in &v {
+//! println!("{}", x);
+//! }
+//! ```
+//!
+//! [`map`]: Iterator::map
+//! [`for_each`]: Iterator::for_each
+//!
+//! Another common way to evaluate an iterator is to use the [`collect`]
+//! method to produce a new collection.
+//!
+//! [`collect`]: Iterator::collect
+//!
+//! # Infinity
+//!
+//! Iterators do not have to be finite. As an example, an open-ended range is
+//! an infinite iterator:
+//!
+//! ```
+//! let numbers = 0..;
+//! ```
+//!
+//! It is common to use the [`take`] iterator adapter to turn an infinite
+//! iterator into a finite one:
+//!
+//! ```
+//! let numbers = 0..;
+//! let five_numbers = numbers.take(5);
+//!
+//! for number in five_numbers {
+//! println!("{}", number);
+//! }
+//! ```
+//!
+//! This will print the numbers `0` through `4`, each on their own line.
+//!
+//! Bear in mind that methods on infinite iterators, even those for which a
+//! result can be determined mathematically in finite time, may not terminate.
+//! Specifically, methods such as [`min`], which in the general case require
+//! traversing every element in the iterator, are likely not to return
+//! successfully for any infinite iterators.
+//!
+//! ```no_run
+//! let ones = std::iter::repeat(1);
+//! let least = ones.min().unwrap(); // Oh no! An infinite loop!
+//! // `ones.min()` causes an infinite loop, so we won't reach this point!
+//! println!("The smallest number one is {}.", least);
+//! ```
+//!
+//! [`take`]: Iterator::take
+//! [`min`]: Iterator::min
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::traits::Iterator;
+
+#[unstable(
+ feature = "step_trait",
+ reason = "likely to be replaced by finer-grained traits",
+ issue = "42168"
+)]
+pub use self::range::Step;
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+pub use self::sources::{empty, Empty};
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub use self::sources::{from_fn, FromFn};
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub use self::sources::{once, Once};
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub use self::sources::{once_with, OnceWith};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::sources::{repeat, Repeat};
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub use self::sources::{repeat_with, RepeatWith};
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub use self::sources::{successors, Successors};
+
+#[stable(feature = "fused", since = "1.26.0")]
+pub use self::traits::FusedIterator;
+#[unstable(feature = "trusted_len", issue = "37572")]
+pub use self::traits::TrustedLen;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::traits::{DoubleEndedIterator, Extend, FromIterator, IntoIterator};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::traits::{ExactSizeIterator, Product, Sum};
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub use self::traits::InPlaceIterable;
+
+#[stable(feature = "iter_cloned", since = "1.1.0")]
+pub use self::adapters::Cloned;
+#[stable(feature = "iter_copied", since = "1.36.0")]
+pub use self::adapters::Copied;
+#[stable(feature = "iterator_flatten", since = "1.29.0")]
+pub use self::adapters::Flatten;
+
+#[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")]
+pub use self::adapters::MapWhile;
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub use self::adapters::SourceIter;
+#[stable(feature = "iterator_step_by", since = "1.28.0")]
+pub use self::adapters::StepBy;
+#[unstable(feature = "trusted_random_access", issue = "none")]
+pub use self::adapters::TrustedRandomAccess;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::adapters::{Chain, Cycle, Enumerate, Filter, FilterMap, Map, Rev, Zip};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::adapters::{FlatMap, Peekable, Scan, Skip, SkipWhile, Take, TakeWhile};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::adapters::{Fuse, Inspect};
+
+pub(crate) use self::adapters::process_results;
+
+mod adapters;
+mod range;
+mod sources;
+mod traits;
--- /dev/null
+use crate::char;
+use crate::convert::TryFrom;
+use crate::mem;
+use crate::ops::{self, Add, Sub, Try};
+
+use super::{FusedIterator, TrustedLen};
+
+/// Objects that have a notion of *successor* and *predecessor* operations.
+///
+/// The *successor* operation moves towards values that compare greater.
+/// The *predecessor* operation moves towards values that compare lesser.
+///
+/// # Safety
+///
+/// This trait is `unsafe` because its implementation must be correct for
+/// the safety of `unsafe trait TrustedLen` implementations, and the results
+/// of using this trait can otherwise be trusted by `unsafe` code to be correct
+/// and fulfill the listed obligations.
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+pub unsafe trait Step: Clone + PartialOrd + Sized {
+ /// Returns the number of *successor* steps required to get from `start` to `end`.
+ ///
+ /// Returns `None` if the number of steps would overflow `usize`
+ /// (or is infinite, or if `end` would never be reached).
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `b`, and `n`:
+ ///
+ /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::forward_checked(&a, n) == Some(b)`
+ /// * `steps_between(&a, &b) == Some(n)` if and only if `Step::backward_checked(&a, n) == Some(a)`
+ /// * `steps_between(&a, &b) == Some(n)` only if `a <= b`
+ /// * Corollary: `steps_between(&a, &b) == Some(0)` if and only if `a == b`
+ /// * Note that `a <= b` does _not_ imply `steps_between(&a, &b) != None`;
+ /// this is the case when it would require more than `usize::MAX` steps to get to `b`
+ /// * `steps_between(&a, &b) == None` if `a > b`
+ fn steps_between(start: &Self, end: &Self) -> Option<usize>;
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`, returns `None`.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`:
+ ///
+ /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, m).and_then(|x| Step::forward_checked(x, n))`
+ ///
+ /// For any `a`, `n`, and `m` where `n + m` does not overflow:
+ ///
+ /// * `Step::forward_checked(a, n).and_then(|x| Step::forward_checked(x, m)) == Step::forward_checked(a, n + m)`
+ ///
+ /// For any `a` and `n`:
+ ///
+ /// * `Step::forward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::forward_checked(&x, 1))`
+ /// * Corollary: `Step::forward_checked(&a, 0) == Some(a)`
+ #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+ fn forward_checked(start: Self, count: usize) -> Option<Self>;
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`,
+ /// this function is allowed to panic, wrap, or saturate.
+ /// The suggested behavior is to panic when debug assertions are enabled,
+ /// and to wrap or saturate otherwise.
+ ///
+ /// Unsafe code should not rely on the correctness of behavior after overflow.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`, where no overflow occurs:
+ ///
+ /// * `Step::forward(Step::forward(a, n), m) == Step::forward(a, n + m)`
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::forward_checked(a, n) == Some(Step::forward(a, n))`
+ /// * `Step::forward(a, n) == (0..n).fold(a, |x, _| Step::forward(x, 1))`
+ /// * Corollary: `Step::forward(a, 0) == a`
+ /// * `Step::forward(a, n) >= a`
+ /// * `Step::backward(Step::forward(a, n), n) == a`
+ #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+ fn forward(start: Self, count: usize) -> Self {
+ Step::forward_checked(start, count).expect("overflow in `Step::forward`")
+ }
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior for this operation to overflow the
+ /// range of values supported by `Self`. If you cannot guarantee that this
+ /// will not overflow, use `forward` or `forward_checked` instead.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`:
+ ///
+ /// * if there exists `b` such that `b > a`, it is safe to call `Step::forward_unchecked(a, 1)`
+ /// * if there exists `b`, `n` such that `steps_between(&a, &b) == Some(n)`,
+ /// it is safe to call `Step::forward_unchecked(a, m)` for any `m <= n`.
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
+ #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
+ unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+ Step::forward(start, count)
+ }
+
+ /// Returns the value that would be obtained by taking the *successor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`, returns `None`.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`:
+ ///
+ /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == n.checked_add(m).and_then(|x| Step::backward_checked(a, x))`
+ /// * `Step::backward_checked(a, n).and_then(|x| Step::backward_checked(x, m)) == try { Step::backward_checked(a, n.checked_add(m)?) }`
+ ///
+ /// For any `a` and `n`:
+ ///
+ /// * `Step::backward_checked(a, n) == (0..n).try_fold(a, |x, _| Step::backward_checked(&x, 1))`
+ /// * Corollary: `Step::backward_checked(&a, 0) == Some(a)`
+ #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+ fn backward_checked(start: Self, count: usize) -> Option<Self>;
+
+ /// Returns the value that would be obtained by taking the *predecessor*
+ /// of `self` `count` times.
+ ///
+ /// If this would overflow the range of values supported by `Self`,
+ /// this function is allowed to panic, wrap, or saturate.
+ /// The suggested behavior is to panic when debug assertions are enabled,
+ /// and to wrap or saturate otherwise.
+ ///
+ /// Unsafe code should not rely on the correctness of behavior after overflow.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`, `n`, and `m`, where no overflow occurs:
+ ///
+ /// * `Step::backward(Step::backward(a, n), m) == Step::backward(a, n + m)`
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::backward_checked(a, n) == Some(Step::backward(a, n))`
+ /// * `Step::backward(a, n) == (0..n).fold(a, |x, _| Step::backward(x, 1))`
+ /// * Corollary: `Step::backward(a, 0) == a`
+ /// * `Step::backward(a, n) <= a`
+ /// * `Step::forward(Step::backward(a, n), n) == a`
+ #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
+ fn backward(start: Self, count: usize) -> Self {
+ Step::backward_checked(start, count).expect("overflow in `Step::backward`")
+ }
+
+ /// Returns the value that would be obtained by taking the *predecessor*
+ /// of `self` `count` times.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior for this operation to overflow the
+ /// range of values supported by `Self`. If you cannot guarantee that this
+ /// will not overflow, use `backward` or `backward_checked` instead.
+ ///
+ /// # Invariants
+ ///
+ /// For any `a`:
+ ///
+ /// * if there exists `b` such that `b < a`, it is safe to call `Step::backward_unchecked(a, 1)`
+ /// * if there exists `b`, `n` such that `steps_between(&b, &a) == Some(n)`,
+ /// it is safe to call `Step::backward_unchecked(a, m)` for any `m <= n`.
+ ///
+ /// For any `a` and `n`, where no overflow occurs:
+ ///
+ /// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
+ #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
+ unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+ Step::backward(start, count)
+ }
+}
+
+// These are still macro-generated because the integer literals resolve to different types.
+macro_rules! step_identical_methods {
+ () => {
+ #[inline]
+ unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
+ // SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
+ unsafe { start.unchecked_add(n as Self) }
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
+ // SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
+ unsafe { start.unchecked_sub(n as Self) }
+ }
+
+ #[inline]
+ fn forward(start: Self, n: usize) -> Self {
+ // In debug builds, trigger a panic on overflow.
+ // This should optimize completely out in release builds.
+ if Self::forward_checked(start, n).is_none() {
+ let _ = Add::add(Self::MAX, 1);
+ }
+ // Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
+ start.wrapping_add(n as Self)
+ }
+
+ #[inline]
+ fn backward(start: Self, n: usize) -> Self {
+ // In debug builds, trigger a panic on overflow.
+ // This should optimize completely out in release builds.
+ if Self::backward_checked(start, n).is_none() {
+ let _ = Sub::sub(Self::MIN, 1);
+ }
+ // Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
+ start.wrapping_sub(n as Self)
+ }
+ };
+}
+
+macro_rules! step_integer_impls {
+ {
+ narrower than or same width as usize:
+ $( [ $u_narrower:ident $i_narrower:ident ] ),+;
+ wider than usize:
+ $( [ $u_wider:ident $i_wider:ident ] ),+;
+ } => {
+ $(
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ unsafe impl Step for $u_narrower {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ // This relies on $u_narrower <= usize
+ Some((*end - *start) as usize)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ match Self::try_from(n) {
+ Ok(n) => start.checked_add(n),
+ Err(_) => None, // if n is out of range, `unsigned_start + n` is too
+ }
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ match Self::try_from(n) {
+ Ok(n) => start.checked_sub(n),
+ Err(_) => None, // if n is out of range, `unsigned_start - n` is too
+ }
+ }
+ }
+
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ unsafe impl Step for $i_narrower {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ // This relies on $i_narrower <= usize
+ //
+ // Casting to isize extends the width but preserves the sign.
+ // Use wrapping_sub in isize space and cast to usize to compute
+ // the difference that may not fit inside the range of isize.
+ Some((*end as isize).wrapping_sub(*start as isize) as usize)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ match $u_narrower::try_from(n) {
+ Ok(n) => {
+ // Wrapping handles cases like
+ // `Step::forward(-120_i8, 200) == Some(80_i8)`,
+ // even though 200 is out of range for i8.
+ let wrapped = start.wrapping_add(n as Self);
+ if wrapped >= start {
+ Some(wrapped)
+ } else {
+ None // Addition overflowed
+ }
+ }
+ // If n is out of range of e.g. u8,
+ // then it is bigger than the entire range for i8 is wide
+ // so `any_i8 + n` necessarily overflows i8.
+ Err(_) => None,
+ }
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ match $u_narrower::try_from(n) {
+ Ok(n) => {
+ // Wrapping handles cases like
+ // `Step::forward(-120_i8, 200) == Some(80_i8)`,
+ // even though 200 is out of range for i8.
+ let wrapped = start.wrapping_sub(n as Self);
+ if wrapped <= start {
+ Some(wrapped)
+ } else {
+ None // Subtraction overflowed
+ }
+ }
+ // If n is out of range of e.g. u8,
+ // then it is bigger than the entire range for i8 is wide
+ // so `any_i8 - n` necessarily overflows i8.
+ Err(_) => None,
+ }
+ }
+ }
+ )+
+
+ $(
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ unsafe impl Step for $u_wider {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ usize::try_from(*end - *start).ok()
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_add(n as Self)
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_sub(n as Self)
+ }
+ }
+
+ #[allow(unreachable_patterns)]
+ #[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+ unsafe impl Step for $i_wider {
+ step_identical_methods!();
+
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ if *start <= *end {
+ match end.checked_sub(*start) {
+ Some(result) => usize::try_from(result).ok(),
+ // If the difference is too big for e.g. i128,
+ // it's also gonna be too big for usize with fewer bits.
+ None => None,
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_add(n as Self)
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, n: usize) -> Option<Self> {
+ start.checked_sub(n as Self)
+ }
+ }
+ )+
+ };
+}
+
+#[cfg(target_pointer_width = "64")]
+step_integer_impls! {
+ narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [u64 i64], [usize isize];
+ wider than usize: [u128 i128];
+}
+
+#[cfg(target_pointer_width = "32")]
+step_integer_impls! {
+ narrower than or same width as usize: [u8 i8], [u16 i16], [u32 i32], [usize isize];
+ wider than usize: [u64 i64], [u128 i128];
+}
+
+#[cfg(target_pointer_width = "16")]
+step_integer_impls! {
+ narrower than or same width as usize: [u8 i8], [u16 i16], [usize isize];
+ wider than usize: [u32 i32], [u64 i64], [u128 i128];
+}
+
+#[unstable(feature = "step_trait", reason = "recently redesigned", issue = "42168")]
+unsafe impl Step for char {
+ #[inline]
+ fn steps_between(&start: &char, &end: &char) -> Option<usize> {
+ let start = start as u32;
+ let end = end as u32;
+ if start <= end {
+ let count = end - start;
+ if start < 0xD800 && 0xE000 <= end {
+ usize::try_from(count - 0x800).ok()
+ } else {
+ usize::try_from(count).ok()
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn forward_checked(start: char, count: usize) -> Option<char> {
+ let start = start as u32;
+ let mut res = Step::forward_checked(start, count)?;
+ if start < 0xD800 && 0xD800 <= res {
+ res = Step::forward_checked(res, 0x800)?;
+ }
+ if res <= char::MAX as u32 {
+ // SAFETY: res is a valid unicode scalar
+ // (below 0x110000 and not in 0xD800..0xE000)
+ Some(unsafe { char::from_u32_unchecked(res) })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn backward_checked(start: char, count: usize) -> Option<char> {
+ let start = start as u32;
+ let mut res = Step::backward_checked(start, count)?;
+ if start >= 0xE000 && 0xE000 > res {
+ res = Step::backward_checked(res, 0x800)?;
+ }
+ // SAFETY: res is a valid unicode scalar
+ // (below 0x110000 and not in 0xD800..0xE000)
+ Some(unsafe { char::from_u32_unchecked(res) })
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: char, count: usize) -> char {
+ let start = start as u32;
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ let mut res = unsafe { Step::forward_unchecked(start, count) };
+ if start < 0xD800 && 0xD800 <= res {
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ res = unsafe { Step::forward_unchecked(res, 0x800) };
+ }
+ // SAFETY: because of the previous contract, this is guaranteed
+ // by the caller to be a valid char.
+ unsafe { char::from_u32_unchecked(res) }
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: char, count: usize) -> char {
+ let start = start as u32;
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ let mut res = unsafe { Step::backward_unchecked(start, count) };
+ if start >= 0xE000 && 0xE000 > res {
+ // SAFETY: the caller must guarantee that this doesn't overflow
+ // the range of values for a char.
+ res = unsafe { Step::backward_unchecked(res, 0x800) };
+ }
+ // SAFETY: because of the previous contract, this is guaranteed
+ // by the caller to be a valid char.
+ unsafe { char::from_u32_unchecked(res) }
+ }
+}
+
+macro_rules! range_exact_iter_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl ExactSizeIterator for ops::Range<$t> { }
+ )*)
+}
+
+macro_rules! range_incl_exact_iter_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "inclusive_range", since = "1.26.0")]
+ impl ExactSizeIterator for ops::RangeInclusive<$t> { }
+ )*)
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Step> Iterator for ops::Range<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if self.start < self.end {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+ Some(mem::replace(&mut self.start, n))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.start < self.end {
+ let hint = Step::steps_between(&self.start, &self.end);
+ (hint.unwrap_or(usize::MAX), hint)
+ } else {
+ (0, Some(0))
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
+ if plus_n < self.end {
+ // SAFETY: just checked precondition
+ self.start = unsafe { Step::forward_unchecked(plus_n.clone(), 1) };
+ return Some(plus_n);
+ }
+ }
+
+ self.start = self.end.clone();
+ None
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<A> {
+ self.next_back()
+ }
+
+ #[inline]
+ fn min(mut self) -> Option<A> {
+ self.next()
+ }
+
+ #[inline]
+ fn max(mut self) -> Option<A> {
+ self.next_back()
+ }
+}
+
+// These macros generate `ExactSizeIterator` impls for various range types.
+//
+// * `ExactSizeIterator::len` is required to always return an exact `usize`,
+// so no range can be longer than `usize::MAX`.
+// * For integer types in `Range<_>` this is the case for types narrower than or as wide as `usize`.
+// For integer types in `RangeInclusive<_>`
+// this is the case for types *strictly narrower* than `usize`
+// since e.g. `(0..=u64::MAX).len()` would be `u64::MAX + 1`.
+range_exact_iter_impl! {
+ usize u8 u16
+ isize i8 i16
+
+ // These are incorect per the reasoning above,
+ // but removing them would be a breaking change as they were stabilized in Rust 1.0.0.
+ // So e.g. `(0..66_000_u32).len()` for example will compile without error or warnings
+ // on 16-bit platforms, but continue to give a wrong result.
+ u32
+ i32
+}
+range_incl_exact_iter_impl! {
+ u8
+ i8
+
+ // These are incorect per the reasoning above,
+ // but removing them would be a breaking change as they were stabilized in Rust 1.26.0.
+ // So e.g. `(0..=u16::MAX).len()` for example will compile without error or warnings
+ // on 16-bit platforms, but continue to give a wrong result.
+ u16
+ i16
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Step> DoubleEndedIterator for ops::Range<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ if self.start < self.end {
+ // SAFETY: just checked precondition
+ self.end = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
+ Some(self.end.clone())
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
+ if minus_n > self.start {
+ // SAFETY: just checked precondition
+ self.end = unsafe { Step::backward_unchecked(minus_n, 1) };
+ return Some(self.end.clone());
+ }
+ }
+
+ self.end = self.start.clone();
+ None
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Step> TrustedLen for ops::Range<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Step> FusedIterator for ops::Range<A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Step> Iterator for ops::RangeFrom<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ let n = Step::forward(self.start.clone(), 1);
+ Some(mem::replace(&mut self.start, n))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ let plus_n = Step::forward(self.start.clone(), n);
+ self.start = Step::forward(plus_n.clone(), 1);
+ Some(plus_n)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Step> FusedIterator for ops::RangeFrom<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Step> TrustedLen for ops::RangeFrom<A> {}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<A: Step> Iterator for ops::RangeInclusive<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+ let is_iterating = self.start < self.end;
+ Some(if is_iterating {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+ mem::replace(&mut self.start, n)
+ } else {
+ self.exhausted = true;
+ self.start.clone()
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.is_empty() {
+ return (0, Some(0));
+ }
+
+ match Step::steps_between(&self.start, &self.end) {
+ Some(hint) => (hint.saturating_add(1), hint.checked_add(1)),
+ None => (usize::MAX, None),
+ }
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+
+ if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
+ use crate::cmp::Ordering::*;
+
+ match plus_n.partial_cmp(&self.end) {
+ Some(Less) => {
+ self.start = Step::forward(plus_n.clone(), 1);
+ return Some(plus_n);
+ }
+ Some(Equal) => {
+ self.start = plus_n.clone();
+ self.exhausted = true;
+ return Some(plus_n);
+ }
+ _ => {}
+ }
+ }
+
+ self.start = self.end.clone();
+ self.exhausted = true;
+ None
+ }
+
+ #[inline]
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ if self.is_empty() {
+ return try { init };
+ }
+
+ let mut accum = init;
+
+ while self.start < self.end {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::forward_unchecked(self.start.clone(), 1) };
+ let n = mem::replace(&mut self.start, n);
+ accum = f(accum, n)?;
+ }
+
+ self.exhausted = true;
+
+ if self.start == self.end {
+ accum = f(accum, self.start.clone())?;
+ }
+
+ try { accum }
+ }
+
+ #[inline]
+ fn fold<B, F>(mut self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_fold(init, ok(f)).unwrap()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<A> {
+ self.next_back()
+ }
+
+ #[inline]
+ fn min(mut self) -> Option<A> {
+ self.next()
+ }
+
+ #[inline]
+ fn max(mut self) -> Option<A> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<A: Step> DoubleEndedIterator for ops::RangeInclusive<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+ let is_iterating = self.start < self.end;
+ Some(if is_iterating {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
+ mem::replace(&mut self.end, n)
+ } else {
+ self.exhausted = true;
+ self.end.clone()
+ })
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<A> {
+ if self.is_empty() {
+ return None;
+ }
+
+ if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
+ use crate::cmp::Ordering::*;
+
+ match minus_n.partial_cmp(&self.start) {
+ Some(Greater) => {
+ self.end = Step::backward(minus_n.clone(), 1);
+ return Some(minus_n);
+ }
+ Some(Equal) => {
+ self.end = minus_n.clone();
+ self.exhausted = true;
+ return Some(minus_n);
+ }
+ _ => {}
+ }
+ }
+
+ self.end = self.start.clone();
+ self.exhausted = true;
+ None
+ }
+
+ #[inline]
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ if self.is_empty() {
+ return try { init };
+ }
+
+ let mut accum = init;
+
+ while self.start < self.end {
+ // SAFETY: just checked precondition
+ let n = unsafe { Step::backward_unchecked(self.end.clone(), 1) };
+ let n = mem::replace(&mut self.end, n);
+ accum = f(accum, n)?;
+ }
+
+ self.exhausted = true;
+
+ if self.start == self.end {
+ accum = f(accum, self.start.clone())?;
+ }
+
+ try { accum }
+ }
+
+ #[inline]
+ fn rfold<B, F>(mut self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ #[inline]
+ fn ok<B, T>(mut f: impl FnMut(B, T) -> B) -> impl FnMut(B, T) -> Result<B, !> {
+ move |acc, x| Ok(f(acc, x))
+ }
+
+ self.try_rfold(init, ok(f)).unwrap()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Step> TrustedLen for ops::RangeInclusive<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Step> FusedIterator for ops::RangeInclusive<A> {}
--- /dev/null
+use crate::fmt;
+use crate::marker;
+
+use super::{FusedIterator, TrustedLen};
+
+/// An iterator that repeats an element endlessly.
+///
+/// This `struct` is created by the [`repeat()`] function. See its documentation for more.
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Repeat<A> {
+ element: A,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Clone> Iterator for Repeat<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ Some(self.element.clone())
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A: Clone> DoubleEndedIterator for Repeat<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ Some(self.element.clone())
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A: Clone> FusedIterator for Repeat<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A: Clone> TrustedLen for Repeat<A> {}
+
+/// Creates a new iterator that endlessly repeats a single element.
+///
+/// The `repeat()` function repeats a single value over and over again.
+///
+/// Infinite iterators like `repeat()` are often used with adapters like
+/// [`Iterator::take()`], in order to make them finite.
+///
+/// If the element type of the iterator you need does not implement `Clone`,
+/// or if you do not want to keep the repeated element in memory, you can
+/// instead use the [`repeat_with()`] function.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // the number four 4ever:
+/// let mut fours = iter::repeat(4);
+///
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+/// assert_eq!(Some(4), fours.next());
+///
+/// // yup, still four
+/// assert_eq!(Some(4), fours.next());
+/// ```
+///
+/// Going finite with [`Iterator::take()`]:
+///
+/// ```
+/// use std::iter;
+///
+/// // that last example was too many fours. Let's only have four fours.
+/// let mut four_fours = iter::repeat(4).take(4);
+///
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+/// assert_eq!(Some(4), four_fours.next());
+///
+/// // ... and now we're done
+/// assert_eq!(None, four_fours.next());
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn repeat<T: Clone>(elt: T) -> Repeat<T> {
+ Repeat { element: elt }
+}
+
+/// An iterator that repeats elements of type `A` endlessly by
+/// applying the provided closure `F: FnMut() -> A`.
+///
+/// This `struct` is created by the [`repeat_with()`] function.
+/// See its documentation for more.
+#[derive(Copy, Clone, Debug)]
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub struct RepeatWith<F> {
+ repeater: F,
+}
+
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+impl<A, F: FnMut() -> A> Iterator for RepeatWith<F> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ Some((self.repeater)())
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (usize::MAX, None)
+ }
+}
+
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+impl<A, F: FnMut() -> A> FusedIterator for RepeatWith<F> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A, F: FnMut() -> A> TrustedLen for RepeatWith<F> {}
+
+/// Creates a new iterator that repeats elements of type `A` endlessly by
+/// applying the provided closure, the repeater, `F: FnMut() -> A`.
+///
+/// The `repeat_with()` function calls the repeater over and over again.
+///
+/// Infinite iterators like `repeat_with()` are often used with adapters like
+/// [`Iterator::take()`], in order to make them finite.
+///
+/// If the element type of the iterator you need implements [`Clone`], and
+/// it is OK to keep the source element in memory, you should instead use
+/// the [`repeat()`] function.
+///
+/// An iterator produced by `repeat_with()` is not a [`DoubleEndedIterator`].
+/// If you need `repeat_with()` to return a [`DoubleEndedIterator`],
+/// please open a GitHub issue explaining your use case.
+///
+/// [`DoubleEndedIterator`]: crate::iter::DoubleEndedIterator
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // let's assume we have some value of a type that is not `Clone`
+/// // or which don't want to have in memory just yet because it is expensive:
+/// #[derive(PartialEq, Debug)]
+/// struct Expensive;
+///
+/// // a particular value forever:
+/// let mut things = iter::repeat_with(|| Expensive);
+///
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// assert_eq!(Some(Expensive), things.next());
+/// ```
+///
+/// Using mutation and going finite:
+///
+/// ```rust
+/// use std::iter;
+///
+/// // From the zeroth to the third power of two:
+/// let mut curr = 1;
+/// let mut pow2 = iter::repeat_with(|| { let tmp = curr; curr *= 2; tmp })
+/// .take(4);
+///
+/// assert_eq!(Some(1), pow2.next());
+/// assert_eq!(Some(2), pow2.next());
+/// assert_eq!(Some(4), pow2.next());
+/// assert_eq!(Some(8), pow2.next());
+///
+/// // ... and now we're done
+/// assert_eq!(None, pow2.next());
+/// ```
+#[inline]
+#[stable(feature = "iterator_repeat_with", since = "1.28.0")]
+pub fn repeat_with<A, F: FnMut() -> A>(repeater: F) -> RepeatWith<F> {
+ RepeatWith { repeater }
+}
+
+/// An iterator that yields nothing.
+///
+/// This `struct` is created by the [`empty()`] function. See its documentation for more.
+#[stable(feature = "iter_empty", since = "1.2.0")]
+pub struct Empty<T>(marker::PhantomData<T>);
+
+#[stable(feature = "iter_empty_send_sync", since = "1.42.0")]
+unsafe impl<T> Send for Empty<T> {}
+#[stable(feature = "iter_empty_send_sync", since = "1.42.0")]
+unsafe impl<T> Sync for Empty<T> {}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T> fmt::Debug for Empty<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Empty")
+ }
+}
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> Iterator for Empty<T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(0))
+ }
+}
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> DoubleEndedIterator for Empty<T> {
+ fn next_back(&mut self) -> Option<T> {
+ None
+ }
+}
+
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> ExactSizeIterator for Empty<T> {
+ fn len(&self) -> usize {
+ 0
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Empty<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Empty<T> {}
+
+// not #[derive] because that adds a Clone bound on T,
+// which isn't necessary.
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> Clone for Empty<T> {
+ fn clone(&self) -> Empty<T> {
+ Empty(marker::PhantomData)
+ }
+}
+
+// not #[derive] because that adds a Default bound on T,
+// which isn't necessary.
+#[stable(feature = "iter_empty", since = "1.2.0")]
+impl<T> Default for Empty<T> {
+ fn default() -> Empty<T> {
+ Empty(marker::PhantomData)
+ }
+}
+
+/// Creates an iterator that yields nothing.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // this could have been an iterator over i32, but alas, it's just not.
+/// let mut nope = iter::empty::<i32>();
+///
+/// assert_eq!(None, nope.next());
+/// ```
+#[stable(feature = "iter_empty", since = "1.2.0")]
+#[rustc_const_stable(feature = "const_iter_empty", since = "1.32.0")]
+pub const fn empty<T>() -> Empty<T> {
+ Empty(marker::PhantomData)
+}
+
+/// An iterator that yields an element exactly once.
+///
+/// This `struct` is created by the [`once()`] function. See its documentation for more.
+#[derive(Clone, Debug)]
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub struct Once<T> {
+ inner: crate::option::IntoIter<T>,
+}
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+impl<T> Iterator for Once<T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.inner.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+impl<T> DoubleEndedIterator for Once<T> {
+ fn next_back(&mut self) -> Option<T> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "iter_once", since = "1.2.0")]
+impl<T> ExactSizeIterator for Once<T> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Once<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Once<T> {}
+
+/// Creates an iterator that yields an element exactly once.
+///
+/// This is commonly used to adapt a single value into a [`chain()`] of other
+/// kinds of iteration. Maybe you have an iterator that covers almost
+/// everything, but you need an extra special case. Maybe you have a function
+/// which works on iterators, but you only need to process one value.
+///
+/// [`chain()`]: Iterator::chain
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // one is the loneliest number
+/// let mut one = iter::once(1);
+///
+/// assert_eq!(Some(1), one.next());
+///
+/// // just one, that's all we get
+/// assert_eq!(None, one.next());
+/// ```
+///
+/// Chaining together with another iterator. Let's say that we want to iterate
+/// over each file of the `.foo` directory, but also a configuration file,
+/// `.foorc`:
+///
+/// ```no_run
+/// use std::iter;
+/// use std::fs;
+/// use std::path::PathBuf;
+///
+/// let dirs = fs::read_dir(".foo").unwrap();
+///
+/// // we need to convert from an iterator of DirEntry-s to an iterator of
+/// // PathBufs, so we use map
+/// let dirs = dirs.map(|file| file.unwrap().path());
+///
+/// // now, our iterator just for our config file
+/// let config = iter::once(PathBuf::from(".foorc"));
+///
+/// // chain the two iterators together into one big iterator
+/// let files = dirs.chain(config);
+///
+/// // this will give us all of the files in .foo as well as .foorc
+/// for f in files {
+/// println!("{:?}", f);
+/// }
+/// ```
+#[stable(feature = "iter_once", since = "1.2.0")]
+pub fn once<T>(value: T) -> Once<T> {
+ Once { inner: Some(value).into_iter() }
+}
+
+/// An iterator that yields a single element of type `A` by
+/// applying the provided closure `F: FnOnce() -> A`.
+///
+/// This `struct` is created by the [`once_with()`] function.
+/// See its documentation for more.
+#[derive(Clone, Debug)]
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub struct OnceWith<F> {
+ gen: Option<F>,
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> Iterator for OnceWith<F> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ let f = self.gen.take()?;
+ Some(f())
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.gen.iter().size_hint()
+ }
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> DoubleEndedIterator for OnceWith<F> {
+ fn next_back(&mut self) -> Option<A> {
+ self.next()
+ }
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> ExactSizeIterator for OnceWith<F> {
+ fn len(&self) -> usize {
+ self.gen.iter().len()
+ }
+}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+impl<A, F: FnOnce() -> A> FusedIterator for OnceWith<F> {}
+
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+unsafe impl<A, F: FnOnce() -> A> TrustedLen for OnceWith<F> {}
+
+/// Creates an iterator that lazily generates a value exactly once by invoking
+/// the provided closure.
+///
+/// This is commonly used to adapt a single value generator into a [`chain()`] of
+/// other kinds of iteration. Maybe you have an iterator that covers almost
+/// everything, but you need an extra special case. Maybe you have a function
+/// which works on iterators, but you only need to process one value.
+///
+/// Unlike [`once()`], this function will lazily generate the value on request.
+///
+/// [`chain()`]: Iterator::chain
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter;
+///
+/// // one is the loneliest number
+/// let mut one = iter::once_with(|| 1);
+///
+/// assert_eq!(Some(1), one.next());
+///
+/// // just one, that's all we get
+/// assert_eq!(None, one.next());
+/// ```
+///
+/// Chaining together with another iterator. Let's say that we want to iterate
+/// over each file of the `.foo` directory, but also a configuration file,
+/// `.foorc`:
+///
+/// ```no_run
+/// use std::iter;
+/// use std::fs;
+/// use std::path::PathBuf;
+///
+/// let dirs = fs::read_dir(".foo").unwrap();
+///
+/// // we need to convert from an iterator of DirEntry-s to an iterator of
+/// // PathBufs, so we use map
+/// let dirs = dirs.map(|file| file.unwrap().path());
+///
+/// // now, our iterator just for our config file
+/// let config = iter::once_with(|| PathBuf::from(".foorc"));
+///
+/// // chain the two iterators together into one big iterator
+/// let files = dirs.chain(config);
+///
+/// // this will give us all of the files in .foo as well as .foorc
+/// for f in files {
+/// println!("{:?}", f);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "iter_once_with", since = "1.43.0")]
+pub fn once_with<A, F: FnOnce() -> A>(gen: F) -> OnceWith<F> {
+ OnceWith { gen: Some(gen) }
+}
+
+/// Creates a new iterator where each iteration calls the provided closure
+/// `F: FnMut() -> Option<T>`.
+///
+/// This allows creating a custom iterator with any behavior
+/// without using the more verbose syntax of creating a dedicated type
+/// and implementing the [`Iterator`] trait for it.
+///
+/// Note that the `FromFn` iterator doesn’t make assumptions about the behavior of the closure,
+/// and therefore conservatively does not implement [`FusedIterator`],
+/// or override [`Iterator::size_hint()`] from its default `(0, None)`.
+///
+/// The closure can use captures and its environment to track state across iterations. Depending on
+/// how the iterator is used, this may require specifying the [`move`] keyword on the closure.
+///
+/// [`move`]: ../../std/keyword.move.html
+///
+/// # Examples
+///
+/// Let’s re-implement the counter iterator from the [module-level documentation]:
+///
+/// [module-level documentation]: super
+///
+/// ```
+/// let mut count = 0;
+/// let counter = std::iter::from_fn(move || {
+/// // Increment our count. This is why we started at zero.
+/// count += 1;
+///
+/// // Check to see if we've finished counting or not.
+/// if count < 6 {
+/// Some(count)
+/// } else {
+/// None
+/// }
+/// });
+/// assert_eq!(counter.collect::<Vec<_>>(), &[1, 2, 3, 4, 5]);
+/// ```
+#[inline]
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub fn from_fn<T, F>(f: F) -> FromFn<F>
+where
+ F: FnMut() -> Option<T>,
+{
+ FromFn(f)
+}
+
+/// An iterator where each iteration calls the provided closure `F: FnMut() -> Option<T>`.
+///
+/// This `struct` is created by the [`iter::from_fn()`] function.
+/// See its documentation for more.
+///
+/// [`iter::from_fn()`]: from_fn
+#[derive(Clone)]
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+pub struct FromFn<F>(F);
+
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+impl<T, F> Iterator for FromFn<F>
+where
+ F: FnMut() -> Option<T>,
+{
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ (self.0)()
+ }
+}
+
+#[stable(feature = "iter_from_fn", since = "1.34.0")]
+impl<F> fmt::Debug for FromFn<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("FromFn").finish()
+ }
+}
+
+/// Creates a new iterator where each successive item is computed based on the preceding one.
+///
+/// The iterator starts with the given first item (if any)
+/// and calls the given `FnMut(&T) -> Option<T>` closure to compute each item’s successor.
+///
+/// ```
+/// use std::iter::successors;
+///
+/// let powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10));
+/// assert_eq!(powers_of_10.collect::<Vec<_>>(), &[1, 10, 100, 1_000, 10_000]);
+/// ```
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub fn successors<T, F>(first: Option<T>, succ: F) -> Successors<T, F>
+where
+ F: FnMut(&T) -> Option<T>,
+{
+ // If this function returned `impl Iterator<Item=T>`
+ // it could be based on `unfold` and not need a dedicated type.
+ // However having a named `Successors<T, F>` type allows it to be `Clone` when `T` and `F` are.
+ Successors { next: first, succ }
+}
+
+/// An new iterator where each successive item is computed based on the preceding one.
+///
+/// This `struct` is created by the [`iter::successors()`] function.
+/// See its documentation for more.
+///
+/// [`iter::successors()`]: successors
+#[derive(Clone)]
+#[stable(feature = "iter_successors", since = "1.34.0")]
+pub struct Successors<T, F> {
+ next: Option<T>,
+ succ: F,
+}
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+impl<T, F> Iterator for Successors<T, F>
+where
+ F: FnMut(&T) -> Option<T>,
+{
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ let item = self.next.take()?;
+ self.next = (self.succ)(&item);
+ Some(item)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.next.is_some() { (1, None) } else { (0, Some(0)) }
+ }
+}
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+impl<T, F> FusedIterator for Successors<T, F> where F: FnMut(&T) -> Option<T> {}
+
+#[stable(feature = "iter_successors", since = "1.34.0")]
+impl<T: fmt::Debug, F> fmt::Debug for Successors<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Successors").field("next", &self.next).finish()
+ }
+}
--- /dev/null
+use crate::iter;
+use crate::num::Wrapping;
+use crate::ops::{Add, Mul};
+
+/// Trait to represent types that can be created by summing up an iterator.
+///
+/// This trait is used to implement the [`sum()`] method on iterators. Types which
+/// implement the trait can be generated by the [`sum()`] method. Like
+/// [`FromIterator`] this trait should rarely be called directly and instead
+/// interacted with through [`Iterator::sum()`].
+///
+/// [`sum()`]: Sum::sum
+/// [`FromIterator`]: iter::FromIterator
+#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+pub trait Sum<A = Self>: Sized {
+ /// Method which takes an iterator and generates `Self` from the elements by
+ /// "summing up" the items.
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ fn sum<I: Iterator<Item = A>>(iter: I) -> Self;
+}
+
+/// Trait to represent types that can be created by multiplying elements of an
+/// iterator.
+///
+/// This trait is used to implement the [`product()`] method on iterators. Types
+/// which implement the trait can be generated by the [`product()`] method. Like
+/// [`FromIterator`] this trait should rarely be called directly and instead
+/// interacted with through [`Iterator::product()`].
+///
+/// [`product()`]: Product::product
+/// [`FromIterator`]: iter::FromIterator
+#[stable(feature = "iter_arith_traits", since = "1.12.0")]
+pub trait Product<A = Self>: Sized {
+ /// Method which takes an iterator and generates `Self` from the elements by
+ /// multiplying the items.
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ fn product<I: Iterator<Item = A>>(iter: I) -> Self;
+}
+
+// N.B., explicitly use Add and Mul here to inherit overflow checks
+macro_rules! integer_sum_product {
+ (@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($(
+ #[$attr]
+ impl Sum for $a {
+ fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold($zero, Add::add)
+ }
+ }
+
+ #[$attr]
+ impl Product for $a {
+ fn product<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold($one, Mul::mul)
+ }
+ }
+
+ #[$attr]
+ impl<'a> Sum<&'a $a> for $a {
+ fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold($zero, Add::add)
+ }
+ }
+
+ #[$attr]
+ impl<'a> Product<&'a $a> for $a {
+ fn product<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold($one, Mul::mul)
+ }
+ }
+ )*);
+ ($($a:ty)*) => (
+ integer_sum_product!(@impls 0, 1,
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")],
+ $($a)*);
+ integer_sum_product!(@impls Wrapping(0), Wrapping(1),
+ #[stable(feature = "wrapping_iter_arith", since = "1.14.0")],
+ $(Wrapping<$a>)*);
+ );
+}
+
+macro_rules! float_sum_product {
+ ($($a:ident)*) => ($(
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl Sum for $a {
+ fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold(0.0, Add::add)
+ }
+ }
+
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl Product for $a {
+ fn product<I: Iterator<Item=Self>>(iter: I) -> Self {
+ iter.fold(1.0, Mul::mul)
+ }
+ }
+
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl<'a> Sum<&'a $a> for $a {
+ fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold(0.0, Add::add)
+ }
+ }
+
+ #[stable(feature = "iter_arith_traits", since = "1.12.0")]
+ impl<'a> Product<&'a $a> for $a {
+ fn product<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
+ iter.fold(1.0, Mul::mul)
+ }
+ }
+ )*)
+}
+
+integer_sum_product! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize }
+float_sum_product! { f32 f64 }
+
+#[stable(feature = "iter_arith_traits_result", since = "1.16.0")]
+impl<T, U, E> Sum<Result<U, E>> for Result<T, E>
+where
+ T: Sum<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is an [`Err`], no further
+ /// elements are taken, and the [`Err`] is returned. Should no [`Err`]
+ /// occur, the sum of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This sums up every integer in a vector, rejecting the sum if a negative
+ /// element is encountered:
+ ///
+ /// ```
+ /// let v = vec![1, 2];
+ /// let res: Result<i32, &'static str> = v.iter().map(|&x: &i32|
+ /// if x < 0 { Err("Negative element found") }
+ /// else { Ok(x) }
+ /// ).sum();
+ /// assert_eq!(res, Ok(3));
+ /// ```
+ fn sum<I>(iter: I) -> Result<T, E>
+ where
+ I: Iterator<Item = Result<U, E>>,
+ {
+ iter::process_results(iter, |i| i.sum())
+ }
+}
+
+#[stable(feature = "iter_arith_traits_result", since = "1.16.0")]
+impl<T, U, E> Product<Result<U, E>> for Result<T, E>
+where
+ T: Product<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is an [`Err`], no further
+ /// elements are taken, and the [`Err`] is returned. Should no [`Err`]
+ /// occur, the product of all elements is returned.
+ fn product<I>(iter: I) -> Result<T, E>
+ where
+ I: Iterator<Item = Result<U, E>>,
+ {
+ iter::process_results(iter, |i| i.product())
+ }
+}
+
+#[stable(feature = "iter_arith_traits_option", since = "1.37.0")]
+impl<T, U> Sum<Option<U>> for Option<T>
+where
+ T: Sum<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is a [`None`], no further
+ /// elements are taken, and the [`None`] is returned. Should no [`None`]
+ /// occur, the sum of all elements is returned.
+ ///
+ /// # Examples
+ ///
+ /// This sums up the position of the character 'a' in a vector of strings,
+ /// if a word did not have the character 'a' the operation returns `None`:
+ ///
+ /// ```
+ /// let words = vec!["have", "a", "great", "day"];
+ /// let total: Option<usize> = words.iter().map(|w| w.find('a')).sum();
+ /// assert_eq!(total, Some(5));
+ /// ```
+ fn sum<I>(iter: I) -> Option<T>
+ where
+ I: Iterator<Item = Option<U>>,
+ {
+ iter.map(|x| x.ok_or(())).sum::<Result<_, _>>().ok()
+ }
+}
+
+#[stable(feature = "iter_arith_traits_option", since = "1.37.0")]
+impl<T, U> Product<Option<U>> for Option<T>
+where
+ T: Product<U>,
+{
+ /// Takes each element in the [`Iterator`]: if it is a [`None`], no further
+ /// elements are taken, and the [`None`] is returned. Should no [`None`]
+ /// occur, the product of all elements is returned.
+ fn product<I>(iter: I) -> Option<T>
+ where
+ I: Iterator<Item = Option<U>>,
+ {
+ iter.map(|x| x.ok_or(())).product::<Result<_, _>>().ok()
+ }
+}
--- /dev/null
+/// Conversion from an [`Iterator`].
+///
+/// By implementing `FromIterator` for a type, you define how it will be
+/// created from an iterator. This is common for types which describe a
+/// collection of some kind.
+///
+/// [`FromIterator::from_iter()`] is rarely called explicitly, and is instead
+/// used through [`Iterator::collect()`] method. See [`Iterator::collect()`]'s
+/// documentation for more examples.
+///
+/// See also: [`IntoIterator`].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::iter::FromIterator;
+///
+/// let five_fives = std::iter::repeat(5).take(5);
+///
+/// let v = Vec::from_iter(five_fives);
+///
+/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
+/// ```
+///
+/// Using [`Iterator::collect()`] to implicitly use `FromIterator`:
+///
+/// ```
+/// let five_fives = std::iter::repeat(5).take(5);
+///
+/// let v: Vec<i32> = five_fives.collect();
+///
+/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
+/// ```
+///
+/// Implementing `FromIterator` for your type:
+///
+/// ```
+/// use std::iter::FromIterator;
+///
+/// // A sample collection, that's just a wrapper over Vec<T>
+/// #[derive(Debug)]
+/// struct MyCollection(Vec<i32>);
+///
+/// // Let's give it some methods so we can create one and add things
+/// // to it.
+/// impl MyCollection {
+/// fn new() -> MyCollection {
+/// MyCollection(Vec::new())
+/// }
+///
+/// fn add(&mut self, elem: i32) {
+/// self.0.push(elem);
+/// }
+/// }
+///
+/// // and we'll implement FromIterator
+/// impl FromIterator<i32> for MyCollection {
+/// fn from_iter<I: IntoIterator<Item=i32>>(iter: I) -> Self {
+/// let mut c = MyCollection::new();
+///
+/// for i in iter {
+/// c.add(i);
+/// }
+///
+/// c
+/// }
+/// }
+///
+/// // Now we can make a new iterator...
+/// let iter = (0..5).into_iter();
+///
+/// // ... and make a MyCollection out of it
+/// let c = MyCollection::from_iter(iter);
+///
+/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]);
+///
+/// // collect works too!
+///
+/// let iter = (0..5).into_iter();
+/// let c: MyCollection = iter.collect();
+///
+/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "a value of type `{Self}` cannot be built from an iterator \
+ over elements of type `{A}`",
+ label = "value of type `{Self}` cannot be built from `std::iter::Iterator<Item={A}>`"
+)]
+pub trait FromIterator<A>: Sized {
+ /// Creates a value from an iterator.
+ ///
+ /// See the [module-level documentation] for more.
+ ///
+ /// [module-level documentation]: crate::iter
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::iter::FromIterator;
+ ///
+ /// let five_fives = std::iter::repeat(5).take(5);
+ ///
+ /// let v = Vec::from_iter(five_fives);
+ ///
+ /// assert_eq!(v, vec![5, 5, 5, 5, 5]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self;
+}
+
+/// Conversion into an [`Iterator`].
+///
+/// By implementing `IntoIterator` for a type, you define how it will be
+/// converted to an iterator. This is common for types which describe a
+/// collection of some kind.
+///
+/// One benefit of implementing `IntoIterator` is that your type will [work
+/// with Rust's `for` loop syntax](crate::iter#for-loops-and-intoiterator).
+///
+/// See also: [`FromIterator`].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+/// let mut iter = v.into_iter();
+///
+/// assert_eq!(Some(1), iter.next());
+/// assert_eq!(Some(2), iter.next());
+/// assert_eq!(Some(3), iter.next());
+/// assert_eq!(None, iter.next());
+/// ```
+/// Implementing `IntoIterator` for your type:
+///
+/// ```
+/// // A sample collection, that's just a wrapper over Vec<T>
+/// #[derive(Debug)]
+/// struct MyCollection(Vec<i32>);
+///
+/// // Let's give it some methods so we can create one and add things
+/// // to it.
+/// impl MyCollection {
+/// fn new() -> MyCollection {
+/// MyCollection(Vec::new())
+/// }
+///
+/// fn add(&mut self, elem: i32) {
+/// self.0.push(elem);
+/// }
+/// }
+///
+/// // and we'll implement IntoIterator
+/// impl IntoIterator for MyCollection {
+/// type Item = i32;
+/// type IntoIter = std::vec::IntoIter<Self::Item>;
+///
+/// fn into_iter(self) -> Self::IntoIter {
+/// self.0.into_iter()
+/// }
+/// }
+///
+/// // Now we can make a new collection...
+/// let mut c = MyCollection::new();
+///
+/// // ... add some stuff to it ...
+/// c.add(0);
+/// c.add(1);
+/// c.add(2);
+///
+/// // ... and then turn it into an Iterator:
+/// for (i, n) in c.into_iter().enumerate() {
+/// assert_eq!(i as i32, n);
+/// }
+/// ```
+///
+/// It is common to use `IntoIterator` as a trait bound. This allows
+/// the input collection type to change, so long as it is still an
+/// iterator. Additional bounds can be specified by restricting on
+/// `Item`:
+///
+/// ```rust
+/// fn collect_as_strings<T>(collection: T) -> Vec<String>
+/// where
+/// T: IntoIterator,
+/// T::Item: std::fmt::Debug,
+/// {
+/// collection
+/// .into_iter()
+/// .map(|item| format!("{:?}", item))
+/// .collect()
+/// }
+/// ```
+#[rustc_diagnostic_item = "IntoIterator"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait IntoIterator {
+ /// The type of the elements being iterated over.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Item;
+
+ /// Which kind of iterator are we turning this into?
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type IntoIter: Iterator<Item = Self::Item>;
+
+ /// Creates an iterator from a value.
+ ///
+ /// See the [module-level documentation] for more.
+ ///
+ /// [module-level documentation]: crate::iter
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ /// let mut iter = v.into_iter();
+ ///
+ /// assert_eq!(Some(1), iter.next());
+ /// assert_eq!(Some(2), iter.next());
+ /// assert_eq!(Some(3), iter.next());
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[lang = "into_iter"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn into_iter(self) -> Self::IntoIter;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator> IntoIterator for I {
+ type Item = I::Item;
+ type IntoIter = I;
+
+ fn into_iter(self) -> I {
+ self
+ }
+}
+
+/// Extend a collection with the contents of an iterator.
+///
+/// Iterators produce a series of values, and collections can also be thought
+/// of as a series of values. The `Extend` trait bridges this gap, allowing you
+/// to extend a collection by including the contents of that iterator. When
+/// extending a collection with an already existing key, that entry is updated
+/// or, in the case of collections that permit multiple entries with equal
+/// keys, that entry is inserted.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // You can extend a String with some chars:
+/// let mut message = String::from("The first three letters are: ");
+///
+/// message.extend(&['a', 'b', 'c']);
+///
+/// assert_eq!("abc", &message[29..32]);
+/// ```
+///
+/// Implementing `Extend`:
+///
+/// ```
+/// // A sample collection, that's just a wrapper over Vec<T>
+/// #[derive(Debug)]
+/// struct MyCollection(Vec<i32>);
+///
+/// // Let's give it some methods so we can create one and add things
+/// // to it.
+/// impl MyCollection {
+/// fn new() -> MyCollection {
+/// MyCollection(Vec::new())
+/// }
+///
+/// fn add(&mut self, elem: i32) {
+/// self.0.push(elem);
+/// }
+/// }
+///
+/// // since MyCollection has a list of i32s, we implement Extend for i32
+/// impl Extend<i32> for MyCollection {
+///
+/// // This is a bit simpler with the concrete type signature: we can call
+/// // extend on anything which can be turned into an Iterator which gives
+/// // us i32s. Because we need i32s to put into MyCollection.
+/// fn extend<T: IntoIterator<Item=i32>>(&mut self, iter: T) {
+///
+/// // The implementation is very straightforward: loop through the
+/// // iterator, and add() each element to ourselves.
+/// for elem in iter {
+/// self.add(elem);
+/// }
+/// }
+/// }
+///
+/// let mut c = MyCollection::new();
+///
+/// c.add(5);
+/// c.add(6);
+/// c.add(7);
+///
+/// // let's extend our collection with three more numbers
+/// c.extend(vec![1, 2, 3]);
+///
+/// // we've added these elements onto the end
+/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{:?}", c));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Extend<A> {
+ /// Extends a collection with the contents of an iterator.
+ ///
+ /// As this is the only required method for this trait, the [trait-level] docs
+ /// contain more details.
+ ///
+ /// [trait-level]: Extend
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // You can extend a String with some chars:
+ /// let mut message = String::from("abc");
+ ///
+ /// message.extend(['d', 'e', 'f'].iter());
+ ///
+ /// assert_eq!("abcdef", &message);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T);
+
+ /// Extends a collection with exactly one element.
+ #[unstable(feature = "extend_one", issue = "72631")]
+ fn extend_one(&mut self, item: A) {
+ self.extend(Some(item));
+ }
+
+ /// Reserves capacity in a collection for the given number of additional elements.
+ ///
+ /// The default implementation does nothing.
+ #[unstable(feature = "extend_one", issue = "72631")]
+ fn extend_reserve(&mut self, additional: usize) {
+ let _ = additional;
+ }
+}
+
+#[stable(feature = "extend_for_unit", since = "1.28.0")]
+impl Extend<()> for () {
+ fn extend<T: IntoIterator<Item = ()>>(&mut self, iter: T) {
+ iter.into_iter().for_each(drop)
+ }
+ fn extend_one(&mut self, _item: ()) {}
+}
--- /dev/null
+use crate::ops::{ControlFlow, Try};
+
+/// An iterator able to yield elements from both ends.
+///
+/// Something that implements `DoubleEndedIterator` has one extra capability
+/// over something that implements [`Iterator`]: the ability to also take
+/// `Item`s from the back, as well as the front.
+///
+/// It is important to note that both back and forth work on the same range,
+/// and do not cross: iteration is over when they meet in the middle.
+///
+/// In a similar fashion to the [`Iterator`] protocol, once a
+/// `DoubleEndedIterator` returns [`None`] from a [`next_back()`], calling it
+/// again may or may not ever return [`Some`] again. [`next()`] and
+/// [`next_back()`] are interchangeable for this purpose.
+///
+/// [`next_back()`]: DoubleEndedIterator::next_back
+/// [`next()`]: Iterator::next
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let numbers = vec![1, 2, 3, 4, 5, 6];
+///
+/// let mut iter = numbers.iter();
+///
+/// assert_eq!(Some(&1), iter.next());
+/// assert_eq!(Some(&6), iter.next_back());
+/// assert_eq!(Some(&5), iter.next_back());
+/// assert_eq!(Some(&2), iter.next());
+/// assert_eq!(Some(&3), iter.next());
+/// assert_eq!(Some(&4), iter.next());
+/// assert_eq!(None, iter.next());
+/// assert_eq!(None, iter.next_back());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait DoubleEndedIterator: Iterator {
+ /// Removes and returns an element from the end of the iterator.
+ ///
+ /// Returns `None` when there are no more elements.
+ ///
+ /// The [trait-level] docs contain more details.
+ ///
+ /// [trait-level]: DoubleEndedIterator
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let numbers = vec![1, 2, 3, 4, 5, 6];
+ ///
+ /// let mut iter = numbers.iter();
+ ///
+ /// assert_eq!(Some(&1), iter.next());
+ /// assert_eq!(Some(&6), iter.next_back());
+ /// assert_eq!(Some(&5), iter.next_back());
+ /// assert_eq!(Some(&2), iter.next());
+ /// assert_eq!(Some(&3), iter.next());
+ /// assert_eq!(Some(&4), iter.next());
+ /// assert_eq!(None, iter.next());
+ /// assert_eq!(None, iter.next_back());
+ /// ```
+ ///
+ /// # Remarks
+ ///
+ /// The elements yielded by `DoubleEndedIterator`'s methods may differ from
+ /// the ones yielded by [`Iterator`]'s methods:
+ ///
+ /// ```
+ /// let vec = vec![(1, 'a'), (1, 'b'), (1, 'c'), (2, 'a'), (2, 'b')];
+ /// let uniq_by_fst_comp = || {
+ /// let mut seen = std::collections::HashSet::new();
+ /// vec.iter().copied().filter(move |x| seen.insert(x.0))
+ /// };
+ ///
+ /// assert_eq!(uniq_by_fst_comp().last(), Some((2, 'a')));
+ /// assert_eq!(uniq_by_fst_comp().next_back(), Some((2, 'b')));
+ ///
+ /// assert_eq!(
+ /// uniq_by_fst_comp().fold(vec![], |mut v, x| {v.push(x); v}),
+ /// vec![(1, 'a'), (2, 'a')]
+ /// );
+ /// assert_eq!(
+ /// uniq_by_fst_comp().rfold(vec![], |mut v, x| {v.push(x); v}),
+ /// vec![(2, 'b'), (1, 'c')]
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn next_back(&mut self) -> Option<Self::Item>;
+
+ /// Advances the iterator from the back by `n` elements.
+ ///
+ /// `advance_back_by` is the reverse version of [`advance_by`]. This method will
+ /// eagerly skip `n` elements starting from the back by calling [`next_back`] up
+ /// to `n` times until [`None`] is encountered.
+ ///
+ /// `advance_back_by(n)` will return [`Ok(())`] if the iterator successfully advances by
+ /// `n` elements, or [`Err(k)`] if [`None`] is encountered, where `k` is the number of
+ /// elements the iterator is advanced by before running out of elements (i.e. the length
+ /// of the iterator). Note that `k` is always less than `n`.
+ ///
+ /// Calling `advance_back_by(0)` does not consume any elements and always returns [`Ok(())`].
+ ///
+ /// [`advance_by`]: Iterator::advance_by
+ /// [`next_back`]: DoubleEndedIterator::next_back
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_advance_by)]
+ ///
+ /// let a = [3, 4, 5, 6];
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.advance_back_by(2), Ok(()));
+ /// assert_eq!(iter.next_back(), Some(&4));
+ /// assert_eq!(iter.advance_back_by(0), Ok(()));
+ /// assert_eq!(iter.advance_back_by(100), Err(1)); // only `&3` was skipped
+ /// ```
+ ///
+ /// [`Ok(())`]: Ok
+ /// [`Err(k)`]: Err
+ #[inline]
+ #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ for i in 0..n {
+ self.next_back().ok_or(i)?;
+ }
+ Ok(())
+ }
+
+ /// Returns the `n`th element from the end of the iterator.
+ ///
+ /// This is essentially the reversed version of [`Iterator::nth()`].
+ /// Although like most indexing operations, the count starts from zero, so
+ /// `nth_back(0)` returns the first value from the end, `nth_back(1)` the
+ /// second, and so on.
+ ///
+ /// Note that all elements between the end and the returned element will be
+ /// consumed, including the returned element. This also means that calling
+ /// `nth_back(0)` multiple times on the same iterator will return different
+ /// elements.
+ ///
+ /// `nth_back()` will return [`None`] if `n` is greater than or equal to the
+ /// length of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth_back(2), Some(&1));
+ /// ```
+ ///
+ /// Calling `nth_back()` multiple times doesn't rewind the iterator:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.nth_back(1), Some(&2));
+ /// assert_eq!(iter.nth_back(1), None);
+ /// ```
+ ///
+ /// Returning `None` if there are less than `n + 1` elements:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth_back(10), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_nth_back", since = "1.37.0")]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.advance_back_by(n).ok()?;
+ self.next_back()
+ }
+
+ /// This is the reverse version of [`Iterator::try_fold()`]: it takes
+ /// elements starting from the back of the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = ["1", "2", "3"];
+ /// let sum = a.iter()
+ /// .map(|&s| s.parse::<i32>())
+ /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y)));
+ /// assert_eq!(sum, Ok(6));
+ /// ```
+ ///
+ /// Short-circuiting:
+ ///
+ /// ```
+ /// let a = ["1", "rust", "3"];
+ /// let mut it = a.iter();
+ /// let sum = it
+ /// .by_ref()
+ /// .map(|&s| s.parse::<i32>())
+ /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y)));
+ /// assert!(sum.is_err());
+ ///
+ /// // Because it short-circuited, the remaining elements are still
+ /// // available through the iterator.
+ /// assert_eq!(it.next_back(), Some(&"1"));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_try_fold", since = "1.27.0")]
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+
+ /// An iterator method that reduces the iterator's elements to a single,
+ /// final value, starting from the back.
+ ///
+ /// This is the reverse version of [`Iterator::fold()`]: it takes elements
+ /// starting from the back of the iterator.
+ ///
+ /// `rfold()` takes two arguments: an initial value, and a closure with two
+ /// arguments: an 'accumulator', and an element. The closure returns the value that
+ /// the accumulator should have for the next iteration.
+ ///
+ /// The initial value is the value the accumulator will have on the first
+ /// call.
+ ///
+ /// After applying this closure to every element of the iterator, `rfold()`
+ /// returns the accumulator.
+ ///
+ /// This operation is sometimes called 'reduce' or 'inject'.
+ ///
+ /// Folding is useful whenever you have a collection of something, and want
+ /// to produce a single value from it.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// // the sum of all of the elements of a
+ /// let sum = a.iter()
+ /// .rfold(0, |acc, &x| acc + x);
+ ///
+ /// assert_eq!(sum, 6);
+ /// ```
+ ///
+ /// This example builds a string, starting with an initial value
+ /// and continuing with each element from the back until the front:
+ ///
+ /// ```
+ /// let numbers = [1, 2, 3, 4, 5];
+ ///
+ /// let zero = "0".to_string();
+ ///
+ /// let result = numbers.iter().rfold(zero, |acc, &x| {
+ /// format!("({} + {})", x, acc)
+ /// });
+ ///
+ /// assert_eq!(result, "(1 + (2 + (3 + (4 + (5 + 0)))))");
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_rfold", since = "1.27.0")]
+ fn rfold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next_back() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+
+ /// Searches for an element of an iterator from the back that satisfies a predicate.
+ ///
+ /// `rfind()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, starting at the end, and if any
+ /// of them return `true`, then `rfind()` returns [`Some(element)`]. If they all return
+ /// `false`, it returns [`None`].
+ ///
+ /// `rfind()` is short-circuiting; in other words, it will stop processing
+ /// as soon as the closure returns `true`.
+ ///
+ /// Because `rfind()` takes a reference, and many iterators iterate over
+ /// references, this leads to a possibly confusing situation where the
+ /// argument is a double reference. You can see this effect in the
+ /// examples below, with `&&x`.
+ ///
+ /// [`Some(element)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().rfind(|&&x| x == 2), Some(&2));
+ ///
+ /// assert_eq!(a.iter().rfind(|&&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.rfind(|&&x| x == 2), Some(&2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next_back(), Some(&1));
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_rfind", since = "1.27.0")]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
+ move |(), x| {
+ if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE }
+ }
+ }
+
+ self.try_rfold((), check(predicate)).break_value()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I {
+ fn next_back(&mut self) -> Option<I::Item> {
+ (**self).next_back()
+ }
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ (**self).advance_back_by(n)
+ }
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth_back(n)
+ }
+}
--- /dev/null
+/// An iterator that knows its exact length.
+///
+/// Many [`Iterator`]s don't know how many times they will iterate, but some do.
+/// If an iterator knows how many times it can iterate, providing access to
+/// that information can be useful. For example, if you want to iterate
+/// backwards, a good start is to know where the end is.
+///
+/// When implementing an `ExactSizeIterator`, you must also implement
+/// [`Iterator`]. When doing so, the implementation of [`Iterator::size_hint`]
+/// *must* return the exact size of the iterator.
+///
+/// The [`len`] method has a default implementation, so you usually shouldn't
+/// implement it. However, you may be able to provide a more performant
+/// implementation than the default, so overriding it in this case makes sense.
+///
+/// [`len`]: ExactSizeIterator::len
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // a finite range knows exactly how many times it will iterate
+/// let five = 0..5;
+///
+/// assert_eq!(5, five.len());
+/// ```
+///
+/// In the [module-level docs], we implemented an [`Iterator`], `Counter`.
+/// Let's implement `ExactSizeIterator` for it as well:
+///
+/// [module-level docs]: crate::iter
+///
+/// ```
+/// # struct Counter {
+/// # count: usize,
+/// # }
+/// # impl Counter {
+/// # fn new() -> Counter {
+/// # Counter { count: 0 }
+/// # }
+/// # }
+/// # impl Iterator for Counter {
+/// # type Item = usize;
+/// # fn next(&mut self) -> Option<Self::Item> {
+/// # self.count += 1;
+/// # if self.count < 6 {
+/// # Some(self.count)
+/// # } else {
+/// # None
+/// # }
+/// # }
+/// # }
+/// impl ExactSizeIterator for Counter {
+/// // We can easily calculate the remaining number of iterations.
+/// fn len(&self) -> usize {
+/// 5 - self.count
+/// }
+/// }
+///
+/// // And now we can use it!
+///
+/// let counter = Counter::new();
+///
+/// assert_eq!(5, counter.len());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ExactSizeIterator: Iterator {
+ /// Returns the exact length of the iterator.
+ ///
+ /// The implementation ensures that the iterator will return exactly `len()`
+ /// more times a [`Some(T)`] value, before returning [`None`].
+ /// This method has a default implementation, so you usually should not
+ /// implement it directly. However, if you can provide a more efficient
+ /// implementation, you can do so. See the [trait-level] docs for an
+ /// example.
+ ///
+ /// This function has the same safety guarantees as the
+ /// [`Iterator::size_hint`] function.
+ ///
+ /// [trait-level]: ExactSizeIterator
+ /// [`Some(T)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // a finite range knows exactly how many times it will iterate
+ /// let five = 0..5;
+ ///
+ /// assert_eq!(5, five.len());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn len(&self) -> usize {
+ let (lower, upper) = self.size_hint();
+ // Note: This assertion is overly defensive, but it checks the invariant
+ // guaranteed by the trait. If this trait were rust-internal,
+ // we could use debug_assert!; assert_eq! will check all Rust user
+ // implementations too.
+ assert_eq!(upper, Some(lower));
+ lower
+ }
+
+ /// Returns `true` if the iterator is empty.
+ ///
+ /// This method has a default implementation using
+ /// [`ExactSizeIterator::len()`], so you don't need to implement it yourself.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(exact_size_is_empty)]
+ ///
+ /// let mut one_element = std::iter::once(0);
+ /// assert!(!one_element.is_empty());
+ ///
+ /// assert_eq!(one_element.next(), Some(0));
+ /// assert!(one_element.is_empty());
+ ///
+ /// assert_eq!(one_element.next(), None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "exact_size_is_empty", issue = "35428")]
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator + ?Sized> ExactSizeIterator for &mut I {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
--- /dev/null
+// ignore-tidy-filelength
+// This file almost exclusively consists of the definition of `Iterator`. We
+// can't split that into multiple files.
+
+use crate::cmp::{self, Ordering};
+use crate::ops::{Add, ControlFlow, Try};
+
+use super::super::TrustedRandomAccess;
+use super::super::{Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse};
+use super::super::{FlatMap, Flatten};
+use super::super::{FromIterator, Product, Sum, Zip};
+use super::super::{
+ Inspect, Map, MapWhile, Peekable, Rev, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile,
+};
+
+fn _assert_is_object_safe(_: &dyn Iterator<Item = ()>) {}
+
+/// An interface for dealing with iterators.
+///
+/// This is the main iterator trait. For more about the concept of iterators
+/// generally, please see the [module-level documentation]. In particular, you
+/// may want to know how to [implement `Iterator`][impl].
+///
+/// [module-level documentation]: crate::iter
+/// [impl]: crate::iter#implementing-iterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ on(
+ _Self = "[std::ops::Range<Idx>; 1]",
+ label = "if you meant to iterate between two values, remove the square brackets",
+ note = "`[start..end]` is an array of one `Range`; you might have meant to have a `Range` \
+ without the brackets: `start..end`"
+ ),
+ on(
+ _Self = "[std::ops::RangeFrom<Idx>; 1]",
+ label = "if you meant to iterate from a value onwards, remove the square brackets",
+ note = "`[start..]` is an array of one `RangeFrom`; you might have meant to have a \
+ `RangeFrom` without the brackets: `start..`, keeping in mind that iterating over an \
+ unbounded iterator will run forever unless you `break` or `return` from within the \
+ loop"
+ ),
+ on(
+ _Self = "[std::ops::RangeTo<Idx>; 1]",
+ label = "if you meant to iterate until a value, remove the square brackets and add a \
+ starting value",
+ note = "`[..end]` is an array of one `RangeTo`; you might have meant to have a bounded \
+ `Range` without the brackets: `0..end`"
+ ),
+ on(
+ _Self = "[std::ops::RangeInclusive<Idx>; 1]",
+ label = "if you meant to iterate between two values, remove the square brackets",
+ note = "`[start..=end]` is an array of one `RangeInclusive`; you might have meant to have a \
+ `RangeInclusive` without the brackets: `start..=end`"
+ ),
+ on(
+ _Self = "[std::ops::RangeToInclusive<Idx>; 1]",
+ label = "if you meant to iterate until a value (including it), remove the square brackets \
+ and add a starting value",
+ note = "`[..=end]` is an array of one `RangeToInclusive`; you might have meant to have a \
+ bounded `RangeInclusive` without the brackets: `0..=end`"
+ ),
+ on(
+ _Self = "std::ops::RangeTo<Idx>",
+ label = "if you meant to iterate until a value, add a starting value",
+ note = "`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \
+ bounded `Range`: `0..end`"
+ ),
+ on(
+ _Self = "std::ops::RangeToInclusive<Idx>",
+ label = "if you meant to iterate until a value (including it), add a starting value",
+ note = "`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \
+ to have a bounded `RangeInclusive`: `0..=end`"
+ ),
+ on(
+ _Self = "&str",
+ label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
+ ),
+ on(
+ _Self = "std::string::String",
+ label = "`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`"
+ ),
+ on(
+ _Self = "[]",
+ label = "borrow the array with `&` or call `.iter()` on it to iterate over it",
+ note = "arrays are not iterators, but slices like the following are: `&[1, 2, 3]`"
+ ),
+ on(
+ _Self = "{integral}",
+ note = "if you want to iterate between `start` until a value `end`, use the exclusive range \
+ syntax `start..end` or the inclusive range syntax `start..=end`"
+ ),
+ label = "`{Self}` is not an iterator",
+ message = "`{Self}` is not an iterator"
+)]
+#[doc(spotlight)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub trait Iterator {
+ /// The type of the elements being iterated over.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Item;
+
+ /// Advances the iterator and returns the next value.
+ ///
+ /// Returns [`None`] when iteration is finished. Individual iterator
+ /// implementations may choose to resume iteration, and so calling `next()`
+ /// again may or may not eventually start returning [`Some(Item)`] again at some
+ /// point.
+ ///
+ /// [`Some(Item)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// // A call to next() returns the next value...
+ /// assert_eq!(Some(&1), iter.next());
+ /// assert_eq!(Some(&2), iter.next());
+ /// assert_eq!(Some(&3), iter.next());
+ ///
+ /// // ... and then None once it's over.
+ /// assert_eq!(None, iter.next());
+ ///
+ /// // More calls may or may not return `None`. Here, they always will.
+ /// assert_eq!(None, iter.next());
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[lang = "next"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn next(&mut self) -> Option<Self::Item>;
+
+ /// Returns the bounds on the remaining length of the iterator.
+ ///
+ /// Specifically, `size_hint()` returns a tuple where the first element
+ /// is the lower bound, and the second element is the upper bound.
+ ///
+ /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`.
+ /// A [`None`] here means that either there is no known upper bound, or the
+ /// upper bound is larger than [`usize`].
+ ///
+ /// # Implementation notes
+ ///
+ /// It is not enforced that an iterator implementation yields the declared
+ /// number of elements. A buggy iterator may yield less than the lower bound
+ /// or more than the upper bound of elements.
+ ///
+ /// `size_hint()` is primarily intended to be used for optimizations such as
+ /// reserving space for the elements of the iterator, but must not be
+ /// trusted to e.g., omit bounds checks in unsafe code. An incorrect
+ /// implementation of `size_hint()` should not lead to memory safety
+ /// violations.
+ ///
+ /// That said, the implementation should provide a correct estimation,
+ /// because otherwise it would be a violation of the trait's protocol.
+ ///
+ /// The default implementation returns `(0, `[`None`]`)` which is correct for any
+ /// iterator.
+ ///
+ /// [`usize`]: type@usize
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let iter = a.iter();
+ ///
+ /// assert_eq!((3, Some(3)), iter.size_hint());
+ /// ```
+ ///
+ /// A more complex example:
+ ///
+ /// ```
+ /// // The even numbers from zero to ten.
+ /// let iter = (0..10).filter(|x| x % 2 == 0);
+ ///
+ /// // We might iterate from zero to ten times. Knowing that it's five
+ /// // exactly wouldn't be possible without executing filter().
+ /// assert_eq!((0, Some(10)), iter.size_hint());
+ ///
+ /// // Let's add five more numbers with chain()
+ /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20);
+ ///
+ /// // now both bounds are increased by five
+ /// assert_eq!((5, Some(15)), iter.size_hint());
+ /// ```
+ ///
+ /// Returning `None` for an upper bound:
+ ///
+ /// ```
+ /// // an infinite iterator has no upper bound
+ /// // and the maximum possible lower bound
+ /// let iter = 0..;
+ ///
+ /// assert_eq!((usize::MAX, None), iter.size_hint());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, None)
+ }
+
+ /// Consumes the iterator, counting the number of iterations and returning it.
+ ///
+ /// This method will call [`next`] repeatedly until [`None`] is encountered,
+ /// returning the number of times it saw [`Some`]. Note that [`next`] has to be
+ /// called at least once even if the iterator does not have any elements.
+ ///
+ /// [`next`]: Iterator::next
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so counting elements of
+ /// an iterator with more than [`usize::MAX`] elements either produces the
+ /// wrong result or panics. If debug assertions are enabled, a panic is
+ /// guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic if the iterator has more than [`usize::MAX`]
+ /// elements.
+ ///
+ /// [`usize::MAX`]: crate::usize::MAX
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().count(), 3);
+ ///
+ /// let a = [1, 2, 3, 4, 5];
+ /// assert_eq!(a.iter().count(), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn count(self) -> usize
+ where
+ Self: Sized,
+ {
+ #[inline]
+ fn add1<T>(count: usize, _: T) -> usize {
+ // Might overflow.
+ Add::add(count, 1)
+ }
+
+ self.fold(0, add1)
+ }
+
+ /// Consumes the iterator, returning the last element.
+ ///
+ /// This method will evaluate the iterator until it returns [`None`]. While
+ /// doing so, it keeps track of the current element. After [`None`] is
+ /// returned, `last()` will then return the last element it saw.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().last(), Some(&3));
+ ///
+ /// let a = [1, 2, 3, 4, 5];
+ /// assert_eq!(a.iter().last(), Some(&5));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn last(self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ {
+ #[inline]
+ fn some<T>(_: Option<T>, x: T) -> Option<T> {
+ Some(x)
+ }
+
+ self.fold(None, some)
+ }
+
+ /// Advances the iterator by `n` elements.
+ ///
+ /// This method will eagerly skip `n` elements by calling [`next`] up to `n`
+ /// times until [`None`] is encountered.
+ ///
+ /// `advance_by(n)` will return [`Ok(())`][Ok] if the iterator successfully advances by
+ /// `n` elements, or [`Err(k)`][Err] if [`None`] is encountered, where `k` is the number
+ /// of elements the iterator is advanced by before running out of elements (i.e. the
+ /// length of the iterator). Note that `k` is always less than `n`.
+ ///
+ /// Calling `advance_by(0)` does not consume any elements and always returns [`Ok(())`][Ok].
+ ///
+ /// [`next`]: Iterator::next
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_advance_by)]
+ ///
+ /// let a = [1, 2, 3, 4];
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.advance_by(2), Ok(()));
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.advance_by(0), Ok(()));
+ /// assert_eq!(iter.advance_by(100), Err(1)); // only `&4` was skipped
+ /// ```
+ #[inline]
+ #[unstable(feature = "iter_advance_by", reason = "recently added", issue = "77404")]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ for i in 0..n {
+ self.next().ok_or(i)?;
+ }
+ Ok(())
+ }
+
+ /// Returns the `n`th element of the iterator.
+ ///
+ /// Like most indexing operations, the count starts from zero, so `nth(0)`
+ /// returns the first value, `nth(1)` the second, and so on.
+ ///
+ /// Note that all preceding elements, as well as the returned element, will be
+ /// consumed from the iterator. That means that the preceding elements will be
+ /// discarded, and also that calling `nth(0)` multiple times on the same iterator
+ /// will return different elements.
+ ///
+ /// `nth()` will return [`None`] if `n` is greater than or equal to the length of the
+ /// iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth(1), Some(&2));
+ /// ```
+ ///
+ /// Calling `nth()` multiple times doesn't rewind the iterator:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.nth(1), Some(&2));
+ /// assert_eq!(iter.nth(1), None);
+ /// ```
+ ///
+ /// Returning `None` if there are less than `n + 1` elements:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.iter().nth(10), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.advance_by(n).ok()?;
+ self.next()
+ }
+
+ /// Creates an iterator starting at the same point, but stepping by
+ /// the given amount at each iteration.
+ ///
+ /// Note 1: The first element of the iterator will always be returned,
+ /// regardless of the step given.
+ ///
+ /// Note 2: The time at which ignored elements are pulled is not fixed.
+ /// `StepBy` behaves like the sequence `next(), nth(step-1), nth(step-1), …`,
+ /// but is also free to behave like the sequence
+ /// `advance_n_and_return_first(step), advance_n_and_return_first(step), …`
+ /// Which way is used may change for some iterators for performance reasons.
+ /// The second way will advance the iterator earlier and may consume more items.
+ ///
+ /// `advance_n_and_return_first` is the equivalent of:
+ /// ```
+ /// fn advance_n_and_return_first<I>(iter: &mut I, total_step: usize) -> Option<I::Item>
+ /// where
+ /// I: Iterator,
+ /// {
+ /// let next = iter.next();
+ /// if total_step > 1 {
+ /// iter.nth(total_step-2);
+ /// }
+ /// next
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// The method will panic if the given step is `0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [0, 1, 2, 3, 4, 5];
+ /// let mut iter = a.iter().step_by(2);
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&4));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_step_by", since = "1.28.0")]
+ fn step_by(self, step: usize) -> StepBy<Self>
+ where
+ Self: Sized,
+ {
+ StepBy::new(self, step)
+ }
+
+ /// Takes two iterators and creates a new iterator over both in sequence.
+ ///
+ /// `chain()` will return a new iterator which will first iterate over
+ /// values from the first iterator and then over values from the second
+ /// iterator.
+ ///
+ /// In other words, it links two iterators together, in a chain. 🔗
+ ///
+ /// [`once`] is commonly used to adapt a single value into a chain of
+ /// other kinds of iteration.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a1 = [1, 2, 3];
+ /// let a2 = [4, 5, 6];
+ ///
+ /// let mut iter = a1.iter().chain(a2.iter());
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), Some(&4));
+ /// assert_eq!(iter.next(), Some(&5));
+ /// assert_eq!(iter.next(), Some(&6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Since the argument to `chain()` uses [`IntoIterator`], we can pass
+ /// anything that can be converted into an [`Iterator`], not just an
+ /// [`Iterator`] itself. For example, slices (`&[T]`) implement
+ /// [`IntoIterator`], and so can be passed to `chain()` directly:
+ ///
+ /// ```
+ /// let s1 = &[1, 2, 3];
+ /// let s2 = &[4, 5, 6];
+ ///
+ /// let mut iter = s1.iter().chain(s2);
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), Some(&4));
+ /// assert_eq!(iter.next(), Some(&5));
+ /// assert_eq!(iter.next(), Some(&6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// If you work with Windows API, you may wish to convert [`OsStr`] to `Vec<u16>`:
+ ///
+ /// ```
+ /// #[cfg(windows)]
+ /// fn os_str_to_utf16(s: &std::ffi::OsStr) -> Vec<u16> {
+ /// use std::os::windows::ffi::OsStrExt;
+ /// s.encode_wide().chain(std::iter::once(0)).collect()
+ /// }
+ /// ```
+ ///
+ /// [`once`]: crate::iter::once
+ /// [`OsStr`]: ../../std/ffi/struct.OsStr.html
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter>
+ where
+ Self: Sized,
+ U: IntoIterator<Item = Self::Item>,
+ {
+ Chain::new(self, other.into_iter())
+ }
+
+ /// 'Zips up' two iterators into a single iterator of pairs.
+ ///
+ /// `zip()` returns a new iterator that will iterate over two other
+ /// iterators, returning a tuple where the first element comes from the
+ /// first iterator, and the second element comes from the second iterator.
+ ///
+ /// In other words, it zips two iterators together, into a single one.
+ ///
+ /// If either iterator returns [`None`], [`next`] from the zipped iterator
+ /// will return [`None`]. If the first iterator returns [`None`], `zip` will
+ /// short-circuit and `next` will not be called on the second iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a1 = [1, 2, 3];
+ /// let a2 = [4, 5, 6];
+ ///
+ /// let mut iter = a1.iter().zip(a2.iter());
+ ///
+ /// assert_eq!(iter.next(), Some((&1, &4)));
+ /// assert_eq!(iter.next(), Some((&2, &5)));
+ /// assert_eq!(iter.next(), Some((&3, &6)));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Since the argument to `zip()` uses [`IntoIterator`], we can pass
+ /// anything that can be converted into an [`Iterator`], not just an
+ /// [`Iterator`] itself. For example, slices (`&[T]`) implement
+ /// [`IntoIterator`], and so can be passed to `zip()` directly:
+ ///
+ /// ```
+ /// let s1 = &[1, 2, 3];
+ /// let s2 = &[4, 5, 6];
+ ///
+ /// let mut iter = s1.iter().zip(s2);
+ ///
+ /// assert_eq!(iter.next(), Some((&1, &4)));
+ /// assert_eq!(iter.next(), Some((&2, &5)));
+ /// assert_eq!(iter.next(), Some((&3, &6)));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// `zip()` is often used to zip an infinite iterator to a finite one.
+ /// This works because the finite iterator will eventually return [`None`],
+ /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate`]:
+ ///
+ /// ```
+ /// let enumerate: Vec<_> = "foo".chars().enumerate().collect();
+ ///
+ /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect();
+ ///
+ /// assert_eq!((0, 'f'), enumerate[0]);
+ /// assert_eq!((0, 'f'), zipper[0]);
+ ///
+ /// assert_eq!((1, 'o'), enumerate[1]);
+ /// assert_eq!((1, 'o'), zipper[1]);
+ ///
+ /// assert_eq!((2, 'o'), enumerate[2]);
+ /// assert_eq!((2, 'o'), zipper[2]);
+ /// ```
+ ///
+ /// [`enumerate`]: Iterator::enumerate
+ /// [`next`]: Iterator::next
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter>
+ where
+ Self: Sized,
+ U: IntoIterator,
+ {
+ Zip::new(self, other.into_iter())
+ }
+
+ /// Takes a closure and creates an iterator which calls that closure on each
+ /// element.
+ ///
+ /// `map()` transforms one iterator into another, by means of its argument:
+ /// something that implements [`FnMut`]. It produces a new iterator which
+ /// calls this closure on each element of the original iterator.
+ ///
+ /// If you are good at thinking in types, you can think of `map()` like this:
+ /// If you have an iterator that gives you elements of some type `A`, and
+ /// you want an iterator of some other type `B`, you can use `map()`,
+ /// passing a closure that takes an `A` and returns a `B`.
+ ///
+ /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is
+ /// lazy, it is best used when you're already working with other iterators.
+ /// If you're doing some sort of looping for a side effect, it's considered
+ /// more idiomatic to use [`for`] than `map()`.
+ ///
+ /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+ /// [`FnMut`]: crate::ops::FnMut
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().map(|x| 2 * x);
+ ///
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), Some(6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// If you're doing some sort of side effect, prefer [`for`] to `map()`:
+ ///
+ /// ```
+ /// # #![allow(unused_must_use)]
+ /// // don't do this:
+ /// (0..5).map(|x| println!("{}", x));
+ ///
+ /// // it won't even execute, as it is lazy. Rust will warn you about this.
+ ///
+ /// // Instead, use for:
+ /// for x in 0..5 {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn map<B, F>(self, f: F) -> Map<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> B,
+ {
+ Map::new(self, f)
+ }
+
+ /// Calls a closure on each element of an iterator.
+ ///
+ /// This is equivalent to using a [`for`] loop on the iterator, although
+ /// `break` and `continue` are not possible from a closure. It's generally
+ /// more idiomatic to use a `for` loop, but `for_each` may be more legible
+ /// when processing items at the end of longer iterator chains. In some
+ /// cases `for_each` may also be faster than a loop, because it will use
+ /// internal iteration on adaptors like `Chain`.
+ ///
+ /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::sync::mpsc::channel;
+ ///
+ /// let (tx, rx) = channel();
+ /// (0..5).map(|x| x * 2 + 1)
+ /// .for_each(move |x| tx.send(x).unwrap());
+ ///
+ /// let v: Vec<_> = rx.iter().collect();
+ /// assert_eq!(v, vec![1, 3, 5, 7, 9]);
+ /// ```
+ ///
+ /// For such a small example, a `for` loop may be cleaner, but `for_each`
+ /// might be preferable to keep a functional style with longer iterators:
+ ///
+ /// ```
+ /// (0..5).flat_map(|x| x * 100 .. x * 110)
+ /// .enumerate()
+ /// .filter(|&(i, x)| (i + x) % 3 == 0)
+ /// .for_each(|(i, x)| println!("{}:{}", i, x));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_for_each", since = "1.21.0")]
+ fn for_each<F>(self, f: F)
+ where
+ Self: Sized,
+ F: FnMut(Self::Item),
+ {
+ #[inline]
+ fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
+ move |(), item| f(item)
+ }
+
+ self.fold((), call(f));
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element
+ /// should be yielded.
+ ///
+ /// Given an element the closure must return `true` or `false`. The returned
+ /// iterator will yield only the elements for which the closure returns
+ /// true.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [0i32, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|x| x.is_positive());
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because the closure passed to `filter()` takes a reference, and many
+ /// iterators iterate over references, this leads to a possibly confusing
+ /// situation, where the type of the closure is a double reference:
+ ///
+ /// ```
+ /// let a = [0, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|x| **x > 1); // need two *s!
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// It's common to instead use destructuring on the argument to strip away
+ /// one:
+ ///
+ /// ```
+ /// let a = [0, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|&x| *x > 1); // both & and *
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// or both:
+ ///
+ /// ```
+ /// let a = [0, 1, 2];
+ ///
+ /// let mut iter = a.iter().filter(|&&x| x > 1); // two &s
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// of these layers.
+ ///
+ /// Note that `iter.filter(f).next()` is equivalent to `iter.find(f)`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn filter<P>(self, predicate: P) -> Filter<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ Filter::new(self, predicate)
+ }
+
+ /// Creates an iterator that both filters and maps.
+ ///
+ /// The returned iterator yields only the `value`s for which the supplied
+ /// closure returns `Some(value)`.
+ ///
+ /// `filter_map` can be used to make chains of [`filter`] and [`map`] more
+ /// concise. The example below shows how a `map().filter().map()` can be
+ /// shortened to a single call to `filter_map`.
+ ///
+ /// [`filter`]: Iterator::filter
+ /// [`map`]: Iterator::map
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = ["1", "two", "NaN", "four", "5"];
+ ///
+ /// let mut iter = a.iter().filter_map(|s| s.parse().ok());
+ ///
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(5));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Here's the same example, but with [`filter`] and [`map`]:
+ ///
+ /// ```
+ /// let a = ["1", "two", "NaN", "four", "5"];
+ /// let mut iter = a.iter().map(|s| s.parse()).filter(|s| s.is_ok()).map(|s| s.unwrap());
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(5));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// [`Option<T>`]: Option
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> Option<B>,
+ {
+ FilterMap::new(self, f)
+ }
+
+ /// Creates an iterator which gives the current iteration count as well as
+ /// the next value.
+ ///
+ /// The iterator returned yields pairs `(i, val)`, where `i` is the
+ /// current index of iteration and `val` is the value returned by the
+ /// iterator.
+ ///
+ /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a
+ /// different sized integer, the [`zip`] function provides similar
+ /// functionality.
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so enumerating more than
+ /// [`usize::MAX`] elements either produces the wrong result or panics. If
+ /// debug assertions are enabled, a panic is guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// The returned iterator might panic if the to-be-returned index would
+ /// overflow a [`usize`].
+ ///
+ /// [`usize`]: type@usize
+ /// [`usize::MAX`]: crate::usize::MAX
+ /// [`zip`]: Iterator::zip
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = ['a', 'b', 'c'];
+ ///
+ /// let mut iter = a.iter().enumerate();
+ ///
+ /// assert_eq!(iter.next(), Some((0, &'a')));
+ /// assert_eq!(iter.next(), Some((1, &'b')));
+ /// assert_eq!(iter.next(), Some((2, &'c')));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn enumerate(self) -> Enumerate<Self>
+ where
+ Self: Sized,
+ {
+ Enumerate::new(self)
+ }
+
+ /// Creates an iterator which can use [`peek`] to look at the next element of
+ /// the iterator without consuming it.
+ ///
+ /// Adds a [`peek`] method to an iterator. See its documentation for
+ /// more information.
+ ///
+ /// Note that the underlying iterator is still advanced when [`peek`] is
+ /// called for the first time: In order to retrieve the next element,
+ /// [`next`] is called on the underlying iterator, hence any side effects (i.e.
+ /// anything other than fetching the next value) of the [`next`] method
+ /// will occur.
+ ///
+ /// [`peek`]: Peekable::peek
+ /// [`next`]: Iterator::next
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let xs = [1, 2, 3];
+ ///
+ /// let mut iter = xs.iter().peekable();
+ ///
+ /// // peek() lets us see into the future
+ /// assert_eq!(iter.peek(), Some(&&1));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// assert_eq!(iter.next(), Some(&2));
+ ///
+ /// // we can peek() multiple times, the iterator won't advance
+ /// assert_eq!(iter.peek(), Some(&&3));
+ /// assert_eq!(iter.peek(), Some(&&3));
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // after the iterator is finished, so is peek()
+ /// assert_eq!(iter.peek(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn peekable(self) -> Peekable<Self>
+ where
+ Self: Sized,
+ {
+ Peekable::new(self)
+ }
+
+ /// Creates an iterator that [`skip`]s elements based on a predicate.
+ ///
+ /// [`skip`]: Iterator::skip
+ ///
+ /// `skip_while()` takes a closure as an argument. It will call this
+ /// closure on each element of the iterator, and ignore elements
+ /// until it returns `false`.
+ ///
+ /// After `false` is returned, `skip_while()`'s job is over, and the
+ /// rest of the elements are yielded.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [-1i32, 0, 1];
+ ///
+ /// let mut iter = a.iter().skip_while(|x| x.is_negative());
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because the closure passed to `skip_while()` takes a reference, and many
+ /// iterators iterate over references, this leads to a possibly confusing
+ /// situation, where the type of the closure is a double reference:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1];
+ ///
+ /// let mut iter = a.iter().skip_while(|x| **x < 0); // need two *s!
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Stopping after an initial `false`:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1, -2];
+ ///
+ /// let mut iter = a.iter().skip_while(|x| **x < 0);
+ ///
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// // while this would have been false, since we already got a false,
+ /// // skip_while() isn't used any more
+ /// assert_eq!(iter.next(), Some(&-2));
+ ///
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ SkipWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that yields elements based on a predicate.
+ ///
+ /// `take_while()` takes a closure as an argument. It will call this
+ /// closure on each element of the iterator, and yield elements
+ /// while it returns `true`.
+ ///
+ /// After `false` is returned, `take_while()`'s job is over, and the
+ /// rest of the elements are ignored.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [-1i32, 0, 1];
+ ///
+ /// let mut iter = a.iter().take_while(|x| x.is_negative());
+ ///
+ /// assert_eq!(iter.next(), Some(&-1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because the closure passed to `take_while()` takes a reference, and many
+ /// iterators iterate over references, this leads to a possibly confusing
+ /// situation, where the type of the closure is a double reference:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1];
+ ///
+ /// let mut iter = a.iter().take_while(|x| **x < 0); // need two *s!
+ ///
+ /// assert_eq!(iter.next(), Some(&-1));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Stopping after an initial `false`:
+ ///
+ /// ```
+ /// let a = [-1, 0, 1, -2];
+ ///
+ /// let mut iter = a.iter().take_while(|x| **x < 0);
+ ///
+ /// assert_eq!(iter.next(), Some(&-1));
+ ///
+ /// // We have more elements that are less than zero, but since we already
+ /// // got a false, take_while() isn't used any more
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Because `take_while()` needs to look at the value in order to see if it
+ /// should be included or not, consuming iterators will see that it is
+ /// removed:
+ ///
+ /// ```
+ /// let a = [1, 2, 3, 4];
+ /// let mut iter = a.iter();
+ ///
+ /// let result: Vec<i32> = iter.by_ref()
+ /// .take_while(|n| **n != 3)
+ /// .cloned()
+ /// .collect();
+ ///
+ /// assert_eq!(result, &[1, 2]);
+ ///
+ /// let result: Vec<i32> = iter.cloned().collect();
+ ///
+ /// assert_eq!(result, &[4]);
+ /// ```
+ ///
+ /// The `3` is no longer there, because it was consumed in order to see if
+ /// the iteration should stop, but wasn't placed back into the iterator.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ TakeWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that both yields elements based on a predicate and maps.
+ ///
+ /// `map_while()` takes a closure as an argument. It will call this
+ /// closure on each element of the iterator, and yield elements
+ /// while it returns [`Some(_)`][`Some`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_map_while)]
+ /// let a = [-1i32, 4, 0, 1];
+ ///
+ /// let mut iter = a.iter().map_while(|x| 16i32.checked_div(*x));
+ ///
+ /// assert_eq!(iter.next(), Some(-16));
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Here's the same example, but with [`take_while`] and [`map`]:
+ ///
+ /// [`take_while`]: Iterator::take_while
+ /// [`map`]: Iterator::map
+ ///
+ /// ```
+ /// let a = [-1i32, 4, 0, 1];
+ ///
+ /// let mut iter = a.iter()
+ /// .map(|x| 16i32.checked_div(*x))
+ /// .take_while(|x| x.is_some())
+ /// .map(|x| x.unwrap());
+ ///
+ /// assert_eq!(iter.next(), Some(-16));
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// Stopping after an initial [`None`]:
+ ///
+ /// ```
+ /// #![feature(iter_map_while)]
+ /// use std::convert::TryFrom;
+ ///
+ /// let a = [0, 1, 2, -3, 4, 5, -6];
+ ///
+ /// let iter = a.iter().map_while(|x| u32::try_from(*x).ok());
+ /// let vec = iter.collect::<Vec<_>>();
+ ///
+ /// // We have more elements which could fit in u32 (4, 5), but `map_while` returned `None` for `-3`
+ /// // (as the `predicate` returned `None`) and `collect` stops at the first `None` encountered.
+ /// assert_eq!(vec, vec![0, 1, 2]);
+ /// ```
+ ///
+ /// Because `map_while()` needs to look at the value in order to see if it
+ /// should be included or not, consuming iterators will see that it is
+ /// removed:
+ ///
+ /// ```
+ /// #![feature(iter_map_while)]
+ /// use std::convert::TryFrom;
+ ///
+ /// let a = [1, 2, -3, 4];
+ /// let mut iter = a.iter();
+ ///
+ /// let result: Vec<u32> = iter.by_ref()
+ /// .map_while(|n| u32::try_from(*n).ok())
+ /// .collect();
+ ///
+ /// assert_eq!(result, &[1, 2]);
+ ///
+ /// let result: Vec<i32> = iter.cloned().collect();
+ ///
+ /// assert_eq!(result, &[4]);
+ /// ```
+ ///
+ /// The `-3` is no longer there, because it was consumed in order to see if
+ /// the iteration should stop, but wasn't placed back into the iterator.
+ ///
+ /// Note that unlike [`take_while`] this iterator is **not** fused.
+ /// It is also not specified what this iterator returns after the first` None` is returned.
+ /// If you need fused iterator, use [`fuse`].
+ ///
+ /// [`fuse`]: Iterator::fuse
+ #[inline]
+ #[unstable(feature = "iter_map_while", reason = "recently added", issue = "68537")]
+ fn map_while<B, P>(self, predicate: P) -> MapWhile<Self, P>
+ where
+ Self: Sized,
+ P: FnMut(Self::Item) -> Option<B>,
+ {
+ MapWhile::new(self, predicate)
+ }
+
+ /// Creates an iterator that skips the first `n` elements.
+ ///
+ /// After they have been consumed, the rest of the elements are yielded.
+ /// Rather than overriding this method directly, instead override the `nth` method.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().skip(2);
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn skip(self, n: usize) -> Skip<Self>
+ where
+ Self: Sized,
+ {
+ Skip::new(self, n)
+ }
+
+ /// Creates an iterator that yields its first `n` elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().take(2);
+ ///
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// `take()` is often used with an infinite iterator, to make it finite:
+ ///
+ /// ```
+ /// let mut iter = (0..).take(3);
+ ///
+ /// assert_eq!(iter.next(), Some(0));
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// If less than `n` elements are available,
+ /// `take` will limit itself to the size of the underlying iterator:
+ ///
+ /// ```
+ /// let v = vec![1, 2];
+ /// let mut iter = v.into_iter().take(5);
+ /// assert_eq!(iter.next(), Some(1));
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn take(self, n: usize) -> Take<Self>
+ where
+ Self: Sized,
+ {
+ Take::new(self, n)
+ }
+
+ /// An iterator adaptor similar to [`fold`] that holds internal state and
+ /// produces a new iterator.
+ ///
+ /// [`fold`]: Iterator::fold
+ ///
+ /// `scan()` takes two arguments: an initial value which seeds the internal
+ /// state, and a closure with two arguments, the first being a mutable
+ /// reference to the internal state and the second an iterator element.
+ /// The closure can assign to the internal state to share state between
+ /// iterations.
+ ///
+ /// On iteration, the closure will be applied to each element of the
+ /// iterator and the return value from the closure, an [`Option`], is
+ /// yielded by the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().scan(1, |state, &x| {
+ /// // each iteration, we'll multiply the state by the element
+ /// *state = *state * x;
+ ///
+ /// // then, we'll yield the negation of the state
+ /// Some(-*state)
+ /// });
+ ///
+ /// assert_eq!(iter.next(), Some(-1));
+ /// assert_eq!(iter.next(), Some(-2));
+ /// assert_eq!(iter.next(), Some(-6));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F>
+ where
+ Self: Sized,
+ F: FnMut(&mut St, Self::Item) -> Option<B>,
+ {
+ Scan::new(self, initial_state, f)
+ }
+
+ /// Creates an iterator that works like map, but flattens nested structure.
+ ///
+ /// The [`map`] adapter is very useful, but only when the closure
+ /// argument produces values. If it produces an iterator instead, there's
+ /// an extra layer of indirection. `flat_map()` will remove this extra layer
+ /// on its own.
+ ///
+ /// You can think of `flat_map(f)` as the semantic equivalent
+ /// of [`map`]ping, and then [`flatten`]ing as in `map(f).flatten()`.
+ ///
+ /// Another way of thinking about `flat_map()`: [`map`]'s closure returns
+ /// one item for each element, and `flat_map()`'s closure returns an
+ /// iterator for each element.
+ ///
+ /// [`map`]: Iterator::map
+ /// [`flatten`]: Iterator::flatten
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let words = ["alpha", "beta", "gamma"];
+ ///
+ /// // chars() returns an iterator
+ /// let merged: String = words.iter()
+ /// .flat_map(|s| s.chars())
+ /// .collect();
+ /// assert_eq!(merged, "alphabetagamma");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F>
+ where
+ Self: Sized,
+ U: IntoIterator,
+ F: FnMut(Self::Item) -> U,
+ {
+ FlatMap::new(self, f)
+ }
+
+ /// Creates an iterator that flattens nested structure.
+ ///
+ /// This is useful when you have an iterator of iterators or an iterator of
+ /// things that can be turned into iterators and you want to remove one
+ /// level of indirection.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let data = vec![vec![1, 2, 3, 4], vec![5, 6]];
+ /// let flattened = data.into_iter().flatten().collect::<Vec<u8>>();
+ /// assert_eq!(flattened, &[1, 2, 3, 4, 5, 6]);
+ /// ```
+ ///
+ /// Mapping and then flattening:
+ ///
+ /// ```
+ /// let words = ["alpha", "beta", "gamma"];
+ ///
+ /// // chars() returns an iterator
+ /// let merged: String = words.iter()
+ /// .map(|s| s.chars())
+ /// .flatten()
+ /// .collect();
+ /// assert_eq!(merged, "alphabetagamma");
+ /// ```
+ ///
+ /// You can also rewrite this in terms of [`flat_map()`], which is preferable
+ /// in this case since it conveys intent more clearly:
+ ///
+ /// ```
+ /// let words = ["alpha", "beta", "gamma"];
+ ///
+ /// // chars() returns an iterator
+ /// let merged: String = words.iter()
+ /// .flat_map(|s| s.chars())
+ /// .collect();
+ /// assert_eq!(merged, "alphabetagamma");
+ /// ```
+ ///
+ /// Flattening once only removes one level of nesting:
+ ///
+ /// ```
+ /// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]];
+ ///
+ /// let d2 = d3.iter().flatten().collect::<Vec<_>>();
+ /// assert_eq!(d2, [&[1, 2], &[3, 4], &[5, 6], &[7, 8]]);
+ ///
+ /// let d1 = d3.iter().flatten().flatten().collect::<Vec<_>>();
+ /// assert_eq!(d1, [&1, &2, &3, &4, &5, &6, &7, &8]);
+ /// ```
+ ///
+ /// Here we see that `flatten()` does not perform a "deep" flatten.
+ /// Instead, only one level of nesting is removed. That is, if you
+ /// `flatten()` a three-dimensional array the result will be
+ /// two-dimensional and not one-dimensional. To get a one-dimensional
+ /// structure, you have to `flatten()` again.
+ ///
+ /// [`flat_map()`]: Iterator::flat_map
+ #[inline]
+ #[stable(feature = "iterator_flatten", since = "1.29.0")]
+ fn flatten(self) -> Flatten<Self>
+ where
+ Self: Sized,
+ Self::Item: IntoIterator,
+ {
+ Flatten::new(self)
+ }
+
+ /// Creates an iterator which ends after the first [`None`].
+ ///
+ /// After an iterator returns [`None`], future calls may or may not yield
+ /// [`Some(T)`] again. `fuse()` adapts an iterator, ensuring that after a
+ /// [`None`] is given, it will always return [`None`] forever.
+ ///
+ /// [`Some(T)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // an iterator which alternates between Some and None
+ /// struct Alternate {
+ /// state: i32,
+ /// }
+ ///
+ /// impl Iterator for Alternate {
+ /// type Item = i32;
+ ///
+ /// fn next(&mut self) -> Option<i32> {
+ /// let val = self.state;
+ /// self.state = self.state + 1;
+ ///
+ /// // if it's even, Some(i32), else None
+ /// if val % 2 == 0 {
+ /// Some(val)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ ///
+ /// let mut iter = Alternate { state: 0 };
+ ///
+ /// // we can see our iterator going back and forth
+ /// assert_eq!(iter.next(), Some(0));
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), Some(2));
+ /// assert_eq!(iter.next(), None);
+ ///
+ /// // however, once we fuse it...
+ /// let mut iter = iter.fuse();
+ ///
+ /// assert_eq!(iter.next(), Some(4));
+ /// assert_eq!(iter.next(), None);
+ ///
+ /// // it will always return `None` after the first time.
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), None);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fuse(self) -> Fuse<Self>
+ where
+ Self: Sized,
+ {
+ Fuse::new(self)
+ }
+
+ /// Does something with each element of an iterator, passing the value on.
+ ///
+ /// When using iterators, you'll often chain several of them together.
+ /// While working on such code, you might want to check out what's
+ /// happening at various parts in the pipeline. To do that, insert
+ /// a call to `inspect()`.
+ ///
+ /// It's more common for `inspect()` to be used as a debugging tool than to
+ /// exist in your final code, but applications may find it useful in certain
+ /// situations when errors need to be logged before being discarded.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 4, 2, 3];
+ ///
+ /// // this iterator sequence is complex.
+ /// let sum = a.iter()
+ /// .cloned()
+ /// .filter(|x| x % 2 == 0)
+ /// .fold(0, |sum, i| sum + i);
+ ///
+ /// println!("{}", sum);
+ ///
+ /// // let's add some inspect() calls to investigate what's happening
+ /// let sum = a.iter()
+ /// .cloned()
+ /// .inspect(|x| println!("about to filter: {}", x))
+ /// .filter(|x| x % 2 == 0)
+ /// .inspect(|x| println!("made it through filter: {}", x))
+ /// .fold(0, |sum, i| sum + i);
+ ///
+ /// println!("{}", sum);
+ /// ```
+ ///
+ /// This will print:
+ ///
+ /// ```text
+ /// 6
+ /// about to filter: 1
+ /// about to filter: 4
+ /// made it through filter: 4
+ /// about to filter: 2
+ /// made it through filter: 2
+ /// about to filter: 3
+ /// 6
+ /// ```
+ ///
+ /// Logging errors before discarding them:
+ ///
+ /// ```
+ /// let lines = ["1", "2", "a"];
+ ///
+ /// let sum: i32 = lines
+ /// .iter()
+ /// .map(|line| line.parse::<i32>())
+ /// .inspect(|num| {
+ /// if let Err(ref e) = *num {
+ /// println!("Parsing error: {}", e);
+ /// }
+ /// })
+ /// .filter_map(Result::ok)
+ /// .sum();
+ ///
+ /// println!("Sum: {}", sum);
+ /// ```
+ ///
+ /// This will print:
+ ///
+ /// ```text
+ /// Parsing error: invalid digit found in string
+ /// Sum: 3
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn inspect<F>(self, f: F) -> Inspect<Self, F>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item),
+ {
+ Inspect::new(self, f)
+ }
+
+ /// Borrows an iterator, rather than consuming it.
+ ///
+ /// This is useful to allow applying iterator adaptors while still
+ /// retaining ownership of the original iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let iter = a.iter();
+ ///
+ /// let sum: i32 = iter.take(5).fold(0, |acc, i| acc + i);
+ ///
+ /// assert_eq!(sum, 6);
+ ///
+ /// // if we try to use iter again, it won't work. The following line
+ /// // gives "error: use of moved value: `iter`
+ /// // assert_eq!(iter.next(), None);
+ ///
+ /// // let's try that again
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// // instead, we add in a .by_ref()
+ /// let sum: i32 = iter.by_ref().take(2).fold(0, |acc, i| acc + i);
+ ///
+ /// assert_eq!(sum, 3);
+ ///
+ /// // now this is just fine:
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn by_ref(&mut self) -> &mut Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+
+ /// Transforms an iterator into a collection.
+ ///
+ /// `collect()` can take anything iterable, and turn it into a relevant
+ /// collection. This is one of the more powerful methods in the standard
+ /// library, used in a variety of contexts.
+ ///
+ /// The most basic pattern in which `collect()` is used is to turn one
+ /// collection into another. You take a collection, call [`iter`] on it,
+ /// do a bunch of transformations, and then `collect()` at the end.
+ ///
+ /// `collect()` can also create instances of types that are not typical
+ /// collections. For example, a [`String`] can be built from [`char`]s,
+ /// and an iterator of [`Result<T, E>`][`Result`] items can be collected
+ /// into `Result<Collection<T>, E>`. See the examples below for more.
+ ///
+ /// Because `collect()` is so general, it can cause problems with type
+ /// inference. As such, `collect()` is one of the few times you'll see
+ /// the syntax affectionately known as the 'turbofish': `::<>`. This
+ /// helps the inference algorithm understand specifically which collection
+ /// you're trying to collect into.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled: Vec<i32> = a.iter()
+ /// .map(|&x| x * 2)
+ /// .collect();
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// ```
+ ///
+ /// Note that we needed the `: Vec<i32>` on the left-hand side. This is because
+ /// we could collect into, for example, a [`VecDeque<T>`] instead:
+ ///
+ /// [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled: VecDeque<i32> = a.iter().map(|&x| x * 2).collect();
+ ///
+ /// assert_eq!(2, doubled[0]);
+ /// assert_eq!(4, doubled[1]);
+ /// assert_eq!(6, doubled[2]);
+ /// ```
+ ///
+ /// Using the 'turbofish' instead of annotating `doubled`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<i32>>();
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// ```
+ ///
+ /// Because `collect()` only cares about what you're collecting into, you can
+ /// still use a partial type hint, `_`, with the turbofish:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<_>>();
+ ///
+ /// assert_eq!(vec![2, 4, 6], doubled);
+ /// ```
+ ///
+ /// Using `collect()` to make a [`String`]:
+ ///
+ /// ```
+ /// let chars = ['g', 'd', 'k', 'k', 'n'];
+ ///
+ /// let hello: String = chars.iter()
+ /// .map(|&x| x as u8)
+ /// .map(|x| (x + 1) as char)
+ /// .collect();
+ ///
+ /// assert_eq!("hello", hello);
+ /// ```
+ ///
+ /// If you have a list of [`Result<T, E>`][`Result`]s, you can use `collect()` to
+ /// see if any of them failed:
+ ///
+ /// ```
+ /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")];
+ ///
+ /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect();
+ ///
+ /// // gives us the first error
+ /// assert_eq!(Err("nope"), result);
+ ///
+ /// let results = [Ok(1), Ok(3)];
+ ///
+ /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect();
+ ///
+ /// // gives us the list of answers
+ /// assert_eq!(Ok(vec![1, 3]), result);
+ /// ```
+ ///
+ /// [`iter`]: Iterator::next
+ /// [`String`]: ../../std/string/struct.String.html
+ /// [`char`]: type@char
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"]
+ fn collect<B: FromIterator<Self::Item>>(self) -> B
+ where
+ Self: Sized,
+ {
+ FromIterator::from_iter(self)
+ }
+
+ /// Consumes an iterator, creating two collections from it.
+ ///
+ /// The predicate passed to `partition()` can return `true`, or `false`.
+ /// `partition()` returns a pair, all of the elements for which it returned
+ /// `true`, and all of the elements for which it returned `false`.
+ ///
+ /// See also [`is_partitioned()`] and [`partition_in_place()`].
+ ///
+ /// [`is_partitioned()`]: Iterator::is_partitioned
+ /// [`partition_in_place()`]: Iterator::partition_in_place
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let (even, odd): (Vec<i32>, Vec<i32>) = a
+ /// .iter()
+ /// .partition(|&n| n % 2 == 0);
+ ///
+ /// assert_eq!(even, vec![2]);
+ /// assert_eq!(odd, vec![1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn partition<B, F>(self, f: F) -> (B, B)
+ where
+ Self: Sized,
+ B: Default + Extend<Self::Item>,
+ F: FnMut(&Self::Item) -> bool,
+ {
+ #[inline]
+ fn extend<'a, T, B: Extend<T>>(
+ mut f: impl FnMut(&T) -> bool + 'a,
+ left: &'a mut B,
+ right: &'a mut B,
+ ) -> impl FnMut((), T) + 'a {
+ move |(), x| {
+ if f(&x) {
+ left.extend_one(x);
+ } else {
+ right.extend_one(x);
+ }
+ }
+ }
+
+ let mut left: B = Default::default();
+ let mut right: B = Default::default();
+
+ self.fold((), extend(f, &mut left, &mut right));
+
+ (left, right)
+ }
+
+ /// Reorders the elements of this iterator *in-place* according to the given predicate,
+ /// such that all those that return `true` precede all those that return `false`.
+ /// Returns the number of `true` elements found.
+ ///
+ /// The relative order of partitioned items is not maintained.
+ ///
+ /// See also [`is_partitioned()`] and [`partition()`].
+ ///
+ /// [`is_partitioned()`]: Iterator::is_partitioned
+ /// [`partition()`]: Iterator::partition
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(iter_partition_in_place)]
+ ///
+ /// let mut a = [1, 2, 3, 4, 5, 6, 7];
+ ///
+ /// // Partition in-place between evens and odds
+ /// let i = a.iter_mut().partition_in_place(|&n| n % 2 == 0);
+ ///
+ /// assert_eq!(i, 3);
+ /// assert!(a[..i].iter().all(|&n| n % 2 == 0)); // evens
+ /// assert!(a[i..].iter().all(|&n| n % 2 == 1)); // odds
+ /// ```
+ #[unstable(feature = "iter_partition_in_place", reason = "new API", issue = "62543")]
+ fn partition_in_place<'a, T: 'a, P>(mut self, ref mut predicate: P) -> usize
+ where
+ Self: Sized + DoubleEndedIterator<Item = &'a mut T>,
+ P: FnMut(&T) -> bool,
+ {
+ // FIXME: should we worry about the count overflowing? The only way to have more than
+ // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition...
+
+ // These closure "factory" functions exist to avoid genericity in `Self`.
+
+ #[inline]
+ fn is_false<'a, T>(
+ predicate: &'a mut impl FnMut(&T) -> bool,
+ true_count: &'a mut usize,
+ ) -> impl FnMut(&&mut T) -> bool + 'a {
+ move |x| {
+ let p = predicate(&**x);
+ *true_count += p as usize;
+ !p
+ }
+ }
+
+ #[inline]
+ fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ {
+ move |x| predicate(&**x)
+ }
+
+ // Repeatedly find the first `false` and swap it with the last `true`.
+ let mut true_count = 0;
+ while let Some(head) = self.find(is_false(predicate, &mut true_count)) {
+ if let Some(tail) = self.rfind(is_true(predicate)) {
+ crate::mem::swap(head, tail);
+ true_count += 1;
+ } else {
+ break;
+ }
+ }
+ true_count
+ }
+
+ /// Checks if the elements of this iterator are partitioned according to the given predicate,
+ /// such that all those that return `true` precede all those that return `false`.
+ ///
+ /// See also [`partition()`] and [`partition_in_place()`].
+ ///
+ /// [`partition()`]: Iterator::partition
+ /// [`partition_in_place()`]: Iterator::partition_in_place
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(iter_is_partitioned)]
+ ///
+ /// assert!("Iterator".chars().is_partitioned(char::is_uppercase));
+ /// assert!(!"IntoIterator".chars().is_partitioned(char::is_uppercase));
+ /// ```
+ #[unstable(feature = "iter_is_partitioned", reason = "new API", issue = "62544")]
+ fn is_partitioned<P>(mut self, mut predicate: P) -> bool
+ where
+ Self: Sized,
+ P: FnMut(Self::Item) -> bool,
+ {
+ // Either all items test `true`, or the first clause stops at `false`
+ // and we check that there are no more `true` items after that.
+ self.all(&mut predicate) || !self.any(predicate)
+ }
+
+ /// An iterator method that applies a function as long as it returns
+ /// successfully, producing a single, final value.
+ ///
+ /// `try_fold()` takes two arguments: an initial value, and a closure with
+ /// two arguments: an 'accumulator', and an element. The closure either
+ /// returns successfully, with the value that the accumulator should have
+ /// for the next iteration, or it returns failure, with an error value that
+ /// is propagated back to the caller immediately (short-circuiting).
+ ///
+ /// The initial value is the value the accumulator will have on the first
+ /// call. If applying the closure succeeded against every element of the
+ /// iterator, `try_fold()` returns the final accumulator as success.
+ ///
+ /// Folding is useful whenever you have a collection of something, and want
+ /// to produce a single value from it.
+ ///
+ /// # Note to Implementors
+ ///
+ /// Several of the other (forward) methods have default implementations in
+ /// terms of this one, so try to implement this explicitly if it can
+ /// do something better than the default `for` loop implementation.
+ ///
+ /// In particular, try to have this call `try_fold()` on the internal parts
+ /// from which this iterator is composed. If multiple calls are needed,
+ /// the `?` operator may be convenient for chaining the accumulator value
+ /// along, but beware any invariants that need to be upheld before those
+ /// early returns. This is a `&mut self` method, so iteration needs to be
+ /// resumable after hitting an error here.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// // the checked sum of all of the elements of the array
+ /// let sum = a.iter().try_fold(0i8, |acc, &x| acc.checked_add(x));
+ ///
+ /// assert_eq!(sum, Some(6));
+ /// ```
+ ///
+ /// Short-circuiting:
+ ///
+ /// ```
+ /// let a = [10, 20, 30, 100, 40, 50];
+ /// let mut it = a.iter();
+ ///
+ /// // This sum overflows when adding the 100 element
+ /// let sum = it.try_fold(0i8, |acc, &x| acc.checked_add(x));
+ /// assert_eq!(sum, None);
+ ///
+ /// // Because it short-circuited, the remaining elements are still
+ /// // available through the iterator.
+ /// assert_eq!(it.len(), 2);
+ /// assert_eq!(it.next(), Some(&40));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_try_fold", since = "1.27.0")]
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Ok = B>,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x)?;
+ }
+ try { accum }
+ }
+
+ /// An iterator method that applies a fallible function to each item in the
+ /// iterator, stopping at the first error and returning that error.
+ ///
+ /// This can also be thought of as the fallible form of [`for_each()`]
+ /// or as the stateless version of [`try_fold()`].
+ ///
+ /// [`for_each()`]: Iterator::for_each
+ /// [`try_fold()`]: Iterator::try_fold
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fs::rename;
+ /// use std::io::{stdout, Write};
+ /// use std::path::Path;
+ ///
+ /// let data = ["no_tea.txt", "stale_bread.json", "torrential_rain.png"];
+ ///
+ /// let res = data.iter().try_for_each(|x| writeln!(stdout(), "{}", x));
+ /// assert!(res.is_ok());
+ ///
+ /// let mut it = data.iter().cloned();
+ /// let res = it.try_for_each(|x| rename(x, Path::new(x).with_extension("old")));
+ /// assert!(res.is_err());
+ /// // It short-circuited, so the remaining items are still in the iterator:
+ /// assert_eq!(it.next(), Some("stale_bread.json"));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_try_fold", since = "1.27.0")]
+ fn try_for_each<F, R>(&mut self, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> R,
+ R: Try<Ok = ()>,
+ {
+ #[inline]
+ fn call<T, R>(mut f: impl FnMut(T) -> R) -> impl FnMut((), T) -> R {
+ move |(), x| f(x)
+ }
+
+ self.try_fold((), call(f))
+ }
+
+ /// An iterator method that applies a function, producing a single, final value.
+ ///
+ /// `fold()` takes two arguments: an initial value, and a closure with two
+ /// arguments: an 'accumulator', and an element. The closure returns the value that
+ /// the accumulator should have for the next iteration.
+ ///
+ /// The initial value is the value the accumulator will have on the first
+ /// call.
+ ///
+ /// After applying this closure to every element of the iterator, `fold()`
+ /// returns the accumulator.
+ ///
+ /// This operation is sometimes called 'reduce' or 'inject'.
+ ///
+ /// Folding is useful whenever you have a collection of something, and want
+ /// to produce a single value from it.
+ ///
+ /// Note: `fold()`, and similar methods that traverse the entire iterator,
+ /// may not terminate for infinite iterators, even on traits for which a
+ /// result is determinable in finite time.
+ ///
+ /// # Note to Implementors
+ ///
+ /// Several of the other (forward) methods have default implementations in
+ /// terms of this one, so try to implement this explicitly if it can
+ /// do something better than the default `for` loop implementation.
+ ///
+ /// In particular, try to have this call `fold()` on the internal parts
+ /// from which this iterator is composed.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// // the sum of all of the elements of the array
+ /// let sum = a.iter().fold(0, |acc, x| acc + x);
+ ///
+ /// assert_eq!(sum, 6);
+ /// ```
+ ///
+ /// Let's walk through each step of the iteration here:
+ ///
+ /// | element | acc | x | result |
+ /// |---------|-----|---|--------|
+ /// | | 0 | | |
+ /// | 1 | 0 | 1 | 1 |
+ /// | 2 | 1 | 2 | 3 |
+ /// | 3 | 3 | 3 | 6 |
+ ///
+ /// And so, our final result, `6`.
+ ///
+ /// It's common for people who haven't used iterators a lot to
+ /// use a `for` loop with a list of things to build up a result. Those
+ /// can be turned into `fold()`s:
+ ///
+ /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
+ ///
+ /// ```
+ /// let numbers = [1, 2, 3, 4, 5];
+ ///
+ /// let mut result = 0;
+ ///
+ /// // for loop:
+ /// for i in &numbers {
+ /// result = result + i;
+ /// }
+ ///
+ /// // fold:
+ /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x);
+ ///
+ /// // they're the same
+ /// assert_eq!(result, result2);
+ /// ```
+ #[doc(alias = "reduce")]
+ #[doc(alias = "inject")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn fold<B, F>(mut self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ let mut accum = init;
+ while let Some(x) = self.next() {
+ accum = f(accum, x);
+ }
+ accum
+ }
+
+ /// The same as [`fold()`], but uses the first element in the
+ /// iterator as the initial value, folding every subsequent element into it.
+ /// If the iterator is empty, return [`None`]; otherwise, return the result
+ /// of the fold.
+ ///
+ /// [`fold()`]: Iterator::fold
+ ///
+ /// # Example
+ ///
+ /// Find the maximum value:
+ ///
+ /// ```
+ /// #![feature(iterator_fold_self)]
+ ///
+ /// fn find_max<I>(iter: I) -> Option<I::Item>
+ /// where I: Iterator,
+ /// I::Item: Ord,
+ /// {
+ /// iter.fold_first(|a, b| {
+ /// if a >= b { a } else { b }
+ /// })
+ /// }
+ /// let a = [10, 20, 5, -23, 0];
+ /// let b: [u32; 0] = [];
+ ///
+ /// assert_eq!(find_max(a.iter()), Some(&20));
+ /// assert_eq!(find_max(b.iter()), None);
+ /// ```
+ #[inline]
+ #[unstable(feature = "iterator_fold_self", issue = "68125")]
+ fn fold_first<F>(mut self, f: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item, Self::Item) -> Self::Item,
+ {
+ let first = self.next()?;
+ Some(self.fold(first, f))
+ }
+
+ /// Tests if every element of the iterator matches a predicate.
+ ///
+ /// `all()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if they all return
+ /// `true`, then so does `all()`. If any of them return `false`, it
+ /// returns `false`.
+ ///
+ /// `all()` is short-circuiting; in other words, it will stop processing
+ /// as soon as it finds a `false`, given that no matter what else happens,
+ /// the result will also be `false`.
+ ///
+ /// An empty iterator returns `true`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert!(a.iter().all(|&x| x > 0));
+ ///
+ /// assert!(!a.iter().all(|&x| x > 2));
+ /// ```
+ ///
+ /// Stopping at the first `false`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert!(!iter.all(|&x| x != 2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&3));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn all<F>(&mut self, f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
+ move |(), x| {
+ if f(x) { ControlFlow::CONTINUE } else { ControlFlow::BREAK }
+ }
+ }
+ self.try_fold((), check(f)) == ControlFlow::CONTINUE
+ }
+
+ /// Tests if any element of the iterator matches a predicate.
+ ///
+ /// `any()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if any of them return
+ /// `true`, then so does `any()`. If they all return `false`, it
+ /// returns `false`.
+ ///
+ /// `any()` is short-circuiting; in other words, it will stop processing
+ /// as soon as it finds a `true`, given that no matter what else happens,
+ /// the result will also be `true`.
+ ///
+ /// An empty iterator returns `false`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert!(a.iter().any(|&x| x > 0));
+ ///
+ /// assert!(!a.iter().any(|&x| x > 5));
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert!(iter.any(|&x| x != 2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&2));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn any<F>(&mut self, f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
+ move |(), x| {
+ if f(x) { ControlFlow::BREAK } else { ControlFlow::CONTINUE }
+ }
+ }
+
+ self.try_fold((), check(f)) == ControlFlow::BREAK
+ }
+
+ /// Searches for an element of an iterator that satisfies a predicate.
+ ///
+ /// `find()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if any of them return
+ /// `true`, then `find()` returns [`Some(element)`]. If they all return
+ /// `false`, it returns [`None`].
+ ///
+ /// `find()` is short-circuiting; in other words, it will stop processing
+ /// as soon as the closure returns `true`.
+ ///
+ /// Because `find()` takes a reference, and many iterators iterate over
+ /// references, this leads to a possibly confusing situation where the
+ /// argument is a double reference. You can see this effect in the
+ /// examples below, with `&&x`.
+ ///
+ /// [`Some(element)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2));
+ ///
+ /// assert_eq!(a.iter().find(|&&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.find(|&&x| x == 2), Some(&2));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&3));
+ /// ```
+ ///
+ /// Note that `iter.find(f)` is equivalent to `iter.filter(f).next()`.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
+ move |(), x| {
+ if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::CONTINUE }
+ }
+ }
+
+ self.try_fold((), check(predicate)).break_value()
+ }
+
+ /// Applies function to the elements of iterator and returns
+ /// the first non-none result.
+ ///
+ /// `iter.find_map(f)` is equivalent to `iter.filter_map(f).next()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = ["lol", "NaN", "2", "5"];
+ ///
+ /// let first_number = a.iter().find_map(|s| s.parse().ok());
+ ///
+ /// assert_eq!(first_number, Some(2));
+ /// ```
+ #[inline]
+ #[stable(feature = "iterator_find_map", since = "1.30.0")]
+ fn find_map<B, F>(&mut self, f: F) -> Option<B>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> Option<B>,
+ {
+ #[inline]
+ fn check<T, B>(mut f: impl FnMut(T) -> Option<B>) -> impl FnMut((), T) -> ControlFlow<B> {
+ move |(), x| match f(x) {
+ Some(x) => ControlFlow::Break(x),
+ None => ControlFlow::CONTINUE,
+ }
+ }
+
+ self.try_fold((), check(f)).break_value()
+ }
+
+ /// Applies function to the elements of iterator and returns
+ /// the first true result or the first error.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(try_find)]
+ ///
+ /// let a = ["1", "2", "lol", "NaN", "5"];
+ ///
+ /// let is_my_num = |s: &str, search: i32| -> Result<bool, std::num::ParseIntError> {
+ /// Ok(s.parse::<i32>()? == search)
+ /// };
+ ///
+ /// let result = a.iter().try_find(|&&s| is_my_num(s, 2));
+ /// assert_eq!(result, Ok(Some(&"2")));
+ ///
+ /// let result = a.iter().try_find(|&&s| is_my_num(s, 5));
+ /// assert!(result.is_err());
+ /// ```
+ #[inline]
+ #[unstable(feature = "try_find", reason = "new API", issue = "63178")]
+ fn try_find<F, R>(&mut self, f: F) -> Result<Option<Self::Item>, R::Error>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item) -> R,
+ R: Try<Ok = bool>,
+ {
+ #[inline]
+ fn check<F, T, R>(mut f: F) -> impl FnMut((), T) -> ControlFlow<Result<T, R::Error>>
+ where
+ F: FnMut(&T) -> R,
+ R: Try<Ok = bool>,
+ {
+ move |(), x| match f(&x).into_result() {
+ Ok(false) => ControlFlow::CONTINUE,
+ Ok(true) => ControlFlow::Break(Ok(x)),
+ Err(x) => ControlFlow::Break(Err(x)),
+ }
+ }
+
+ self.try_fold((), check(f)).break_value().transpose()
+ }
+
+ /// Searches for an element in an iterator, returning its index.
+ ///
+ /// `position()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, and if one of them
+ /// returns `true`, then `position()` returns [`Some(index)`]. If all of
+ /// them return `false`, it returns [`None`].
+ ///
+ /// `position()` is short-circuiting; in other words, it will stop
+ /// processing as soon as it finds a `true`.
+ ///
+ /// # Overflow Behavior
+ ///
+ /// The method does no guarding against overflows, so if there are more
+ /// than [`usize::MAX`] non-matching elements, it either produces the wrong
+ /// result or panics. If debug assertions are enabled, a panic is
+ /// guaranteed.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic if the iterator has more than `usize::MAX`
+ /// non-matching elements.
+ ///
+ /// [`Some(index)`]: Some
+ /// [`usize::MAX`]: crate::usize::MAX
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().position(|&x| x == 2), Some(1));
+ ///
+ /// assert_eq!(a.iter().position(|&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3, 4];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.position(|&x| x >= 2), Some(1));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&3));
+ ///
+ /// // The returned index depends on iterator state
+ /// assert_eq!(iter.position(|&x| x == 4), Some(0));
+ ///
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn position<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ Self: Sized,
+ P: FnMut(Self::Item) -> bool,
+ {
+ #[inline]
+ fn check<T>(
+ mut predicate: impl FnMut(T) -> bool,
+ ) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> {
+ // The addition might panic on overflow
+ move |i, x| {
+ if predicate(x) {
+ ControlFlow::Break(i)
+ } else {
+ ControlFlow::Continue(Add::add(i, 1))
+ }
+ }
+ }
+
+ self.try_fold(0, check(predicate)).break_value()
+ }
+
+ /// Searches for an element in an iterator from the right, returning its
+ /// index.
+ ///
+ /// `rposition()` takes a closure that returns `true` or `false`. It applies
+ /// this closure to each element of the iterator, starting from the end,
+ /// and if one of them returns `true`, then `rposition()` returns
+ /// [`Some(index)`]. If all of them return `false`, it returns [`None`].
+ ///
+ /// `rposition()` is short-circuiting; in other words, it will stop
+ /// processing as soon as it finds a `true`.
+ ///
+ /// [`Some(index)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2));
+ ///
+ /// assert_eq!(a.iter().rposition(|&x| x == 5), None);
+ /// ```
+ ///
+ /// Stopping at the first `true`:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter();
+ ///
+ /// assert_eq!(iter.rposition(|&x| x == 2), Some(1));
+ ///
+ /// // we can still use `iter`, as there are more elements.
+ /// assert_eq!(iter.next(), Some(&1));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn rposition<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ P: FnMut(Self::Item) -> bool,
+ Self: Sized + ExactSizeIterator + DoubleEndedIterator,
+ {
+ // No need for an overflow check here, because `ExactSizeIterator`
+ // implies that the number of elements fits into a `usize`.
+ #[inline]
+ fn check<T>(
+ mut predicate: impl FnMut(T) -> bool,
+ ) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> {
+ move |i, x| {
+ let i = i - 1;
+ if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(i) }
+ }
+ }
+
+ let n = self.len();
+ self.try_rfold(n, check(predicate)).break_value()
+ }
+
+ /// Returns the maximum element of an iterator.
+ ///
+ /// If several elements are equally maximum, the last element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let b: Vec<u32> = Vec::new();
+ ///
+ /// assert_eq!(a.iter().max(), Some(&3));
+ /// assert_eq!(b.iter().max(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn max(self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ Self::Item: Ord,
+ {
+ self.max_by(Ord::cmp)
+ }
+
+ /// Returns the minimum element of an iterator.
+ ///
+ /// If several elements are equally minimum, the first element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let b: Vec<u32> = Vec::new();
+ ///
+ /// assert_eq!(a.iter().min(), Some(&1));
+ /// assert_eq!(b.iter().min(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn min(self) -> Option<Self::Item>
+ where
+ Self: Sized,
+ Self::Item: Ord,
+ {
+ self.min_by(Ord::cmp)
+ }
+
+ /// Returns the element that gives the maximum value from the
+ /// specified function.
+ ///
+ /// If several elements are equally maximum, the last element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_cmp_by_key", since = "1.6.0")]
+ fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item) -> B,
+ {
+ #[inline]
+ fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) {
+ move |x| (f(&x), x)
+ }
+
+ #[inline]
+ fn compare<T, B: Ord>((x_p, _): &(B, T), (y_p, _): &(B, T)) -> Ordering {
+ x_p.cmp(y_p)
+ }
+
+ let (_, x) = self.map(key(f)).max_by(compare)?;
+ Some(x)
+ }
+
+ /// Returns the element that gives the maximum value with respect to the
+ /// specified comparison function.
+ ///
+ /// If several elements are equally maximum, the last element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_max_by", since = "1.15.0")]
+ fn max_by<F>(self, compare: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Ordering,
+ {
+ #[inline]
+ fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
+ move |x, y| cmp::max_by(x, y, &mut compare)
+ }
+
+ self.fold_first(fold(compare))
+ }
+
+ /// Returns the element that gives the minimum value from the
+ /// specified function.
+ ///
+ /// If several elements are equally minimum, the first element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_cmp_by_key", since = "1.6.0")]
+ fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item) -> B,
+ {
+ #[inline]
+ fn key<T, B>(mut f: impl FnMut(&T) -> B) -> impl FnMut(T) -> (B, T) {
+ move |x| (f(&x), x)
+ }
+
+ #[inline]
+ fn compare<T, B: Ord>((x_p, _): &(B, T), (y_p, _): &(B, T)) -> Ordering {
+ x_p.cmp(y_p)
+ }
+
+ let (_, x) = self.map(key(f)).min_by(compare)?;
+ Some(x)
+ }
+
+ /// Returns the element that gives the minimum value with respect to the
+ /// specified comparison function.
+ ///
+ /// If several elements are equally minimum, the first element is
+ /// returned. If the iterator is empty, [`None`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [-3_i32, 0, 1, 5, -10];
+ /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10);
+ /// ```
+ #[inline]
+ #[stable(feature = "iter_min_by", since = "1.15.0")]
+ fn min_by<F>(self, compare: F) -> Option<Self::Item>
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Ordering,
+ {
+ #[inline]
+ fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
+ move |x, y| cmp::min_by(x, y, &mut compare)
+ }
+
+ self.fold_first(fold(compare))
+ }
+
+ /// Reverses an iterator's direction.
+ ///
+ /// Usually, iterators iterate from left to right. After using `rev()`,
+ /// an iterator will instead iterate from right to left.
+ ///
+ /// This is only possible if the iterator has an end, so `rev()` only
+ /// works on [`DoubleEndedIterator`]s.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut iter = a.iter().rev();
+ ///
+ /// assert_eq!(iter.next(), Some(&3));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), Some(&1));
+ ///
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn rev(self) -> Rev<Self>
+ where
+ Self: Sized + DoubleEndedIterator,
+ {
+ Rev::new(self)
+ }
+
+ /// Converts an iterator of pairs into a pair of containers.
+ ///
+ /// `unzip()` consumes an entire iterator of pairs, producing two
+ /// collections: one from the left elements of the pairs, and one
+ /// from the right elements.
+ ///
+ /// This function is, in some sense, the opposite of [`zip`].
+ ///
+ /// [`zip`]: Iterator::zip
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [(1, 2), (3, 4)];
+ ///
+ /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip();
+ ///
+ /// assert_eq!(left, [1, 3]);
+ /// assert_eq!(right, [2, 4]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB)
+ where
+ FromA: Default + Extend<A>,
+ FromB: Default + Extend<B>,
+ Self: Sized + Iterator<Item = (A, B)>,
+ {
+ fn extend<'a, A, B>(
+ ts: &'a mut impl Extend<A>,
+ us: &'a mut impl Extend<B>,
+ ) -> impl FnMut((), (A, B)) + 'a {
+ move |(), (t, u)| {
+ ts.extend_one(t);
+ us.extend_one(u);
+ }
+ }
+
+ let mut ts: FromA = Default::default();
+ let mut us: FromB = Default::default();
+
+ let (lower_bound, _) = self.size_hint();
+ if lower_bound > 0 {
+ ts.extend_reserve(lower_bound);
+ us.extend_reserve(lower_bound);
+ }
+
+ self.fold((), extend(&mut ts, &mut us));
+
+ (ts, us)
+ }
+
+ /// Creates an iterator which copies all of its elements.
+ ///
+ /// This is useful when you have an iterator over `&T`, but you need an
+ /// iterator over `T`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let v_copied: Vec<_> = a.iter().copied().collect();
+ ///
+ /// // copied is the same as .map(|&x| x)
+ /// let v_map: Vec<_> = a.iter().map(|&x| x).collect();
+ ///
+ /// assert_eq!(v_copied, vec![1, 2, 3]);
+ /// assert_eq!(v_map, vec![1, 2, 3]);
+ /// ```
+ #[stable(feature = "iter_copied", since = "1.36.0")]
+ fn copied<'a, T: 'a>(self) -> Copied<Self>
+ where
+ Self: Sized + Iterator<Item = &'a T>,
+ T: Copy,
+ {
+ Copied::new(self)
+ }
+
+ /// Creates an iterator which [`clone`]s all of its elements.
+ ///
+ /// This is useful when you have an iterator over `&T`, but you need an
+ /// iterator over `T`.
+ ///
+ /// [`clone`]: Clone::clone
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let v_cloned: Vec<_> = a.iter().cloned().collect();
+ ///
+ /// // cloned is the same as .map(|&x| x), for integers
+ /// let v_map: Vec<_> = a.iter().map(|&x| x).collect();
+ ///
+ /// assert_eq!(v_cloned, vec![1, 2, 3]);
+ /// assert_eq!(v_map, vec![1, 2, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn cloned<'a, T: 'a>(self) -> Cloned<Self>
+ where
+ Self: Sized + Iterator<Item = &'a T>,
+ T: Clone,
+ {
+ Cloned::new(self)
+ }
+
+ /// Repeats an iterator endlessly.
+ ///
+ /// Instead of stopping at [`None`], the iterator will instead start again,
+ /// from the beginning. After iterating again, it will start at the
+ /// beginning again. And again. And again. Forever.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ ///
+ /// let mut it = a.iter().cycle();
+ ///
+ /// assert_eq!(it.next(), Some(&1));
+ /// assert_eq!(it.next(), Some(&2));
+ /// assert_eq!(it.next(), Some(&3));
+ /// assert_eq!(it.next(), Some(&1));
+ /// assert_eq!(it.next(), Some(&2));
+ /// assert_eq!(it.next(), Some(&3));
+ /// assert_eq!(it.next(), Some(&1));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ fn cycle(self) -> Cycle<Self>
+ where
+ Self: Sized + Clone,
+ {
+ Cycle::new(self)
+ }
+
+ /// Sums the elements of an iterator.
+ ///
+ /// Takes each element, adds them together, and returns the result.
+ ///
+ /// An empty iterator returns the zero value of the type.
+ ///
+ /// # Panics
+ ///
+ /// When calling `sum()` and a primitive integer type is being returned, this
+ /// method will panic if the computation overflows and debug assertions are
+ /// enabled.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let sum: i32 = a.iter().sum();
+ ///
+ /// assert_eq!(sum, 6);
+ /// ```
+ #[stable(feature = "iter_arith", since = "1.11.0")]
+ fn sum<S>(self) -> S
+ where
+ Self: Sized,
+ S: Sum<Self::Item>,
+ {
+ Sum::sum(self)
+ }
+
+ /// Iterates over the entire iterator, multiplying all the elements
+ ///
+ /// An empty iterator returns the one value of the type.
+ ///
+ /// # Panics
+ ///
+ /// When calling `product()` and a primitive integer type is being returned,
+ /// method will panic if the computation overflows and debug assertions are
+ /// enabled.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn factorial(n: u32) -> u32 {
+ /// (1..=n).product()
+ /// }
+ /// assert_eq!(factorial(0), 1);
+ /// assert_eq!(factorial(1), 1);
+ /// assert_eq!(factorial(5), 120);
+ /// ```
+ #[stable(feature = "iter_arith", since = "1.11.0")]
+ fn product<P>(self) -> P
+ where
+ Self: Sized,
+ P: Product<Self::Item>,
+ {
+ Product::product(self)
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!([1].iter().cmp([1].iter()), Ordering::Equal);
+ /// assert_eq!([1].iter().cmp([1, 2].iter()), Ordering::Less);
+ /// assert_eq!([1, 2].iter().cmp([1].iter()), Ordering::Greater);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn cmp<I>(self, other: I) -> Ordering
+ where
+ I: IntoIterator<Item = Self::Item>,
+ Self::Item: Ord,
+ Self: Sized,
+ {
+ self.cmp_by(other, |x, y| x.cmp(&y))
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another with respect to the specified comparison function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_order_by)]
+ ///
+ /// use std::cmp::Ordering;
+ ///
+ /// let xs = [1, 2, 3, 4];
+ /// let ys = [1, 4, 9, 16];
+ ///
+ /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| x.cmp(&y)), Ordering::Less);
+ /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (x * x).cmp(&y)), Ordering::Equal);
+ /// assert_eq!(xs.iter().cmp_by(&ys, |&x, &y| (2 * x).cmp(&y)), Ordering::Greater);
+ /// ```
+ #[unstable(feature = "iter_order_by", issue = "64295")]
+ fn cmp_by<I, F>(mut self, other: I, mut cmp: F) -> Ordering
+ where
+ Self: Sized,
+ I: IntoIterator,
+ F: FnMut(Self::Item, I::Item) -> Ordering,
+ {
+ let mut other = other.into_iter();
+
+ loop {
+ let x = match self.next() {
+ None => {
+ if other.next().is_none() {
+ return Ordering::Equal;
+ } else {
+ return Ordering::Less;
+ }
+ }
+ Some(val) => val,
+ };
+
+ let y = match other.next() {
+ None => return Ordering::Greater,
+ Some(val) => val,
+ };
+
+ match cmp(x, y) {
+ Ordering::Equal => (),
+ non_eq => return non_eq,
+ }
+ }
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!([1.].iter().partial_cmp([1.].iter()), Some(Ordering::Equal));
+ /// assert_eq!([1.].iter().partial_cmp([1., 2.].iter()), Some(Ordering::Less));
+ /// assert_eq!([1., 2.].iter().partial_cmp([1.].iter()), Some(Ordering::Greater));
+ ///
+ /// assert_eq!([f64::NAN].iter().partial_cmp([1.].iter()), None);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn partial_cmp<I>(self, other: I) -> Option<Ordering>
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ self.partial_cmp_by(other, |x, y| x.partial_cmp(&y))
+ }
+
+ /// [Lexicographically](Ord#lexicographical-comparison) compares the elements of this [`Iterator`] with those
+ /// of another with respect to the specified comparison function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_order_by)]
+ ///
+ /// use std::cmp::Ordering;
+ ///
+ /// let xs = [1.0, 2.0, 3.0, 4.0];
+ /// let ys = [1.0, 4.0, 9.0, 16.0];
+ ///
+ /// assert_eq!(
+ /// xs.iter().partial_cmp_by(&ys, |&x, &y| x.partial_cmp(&y)),
+ /// Some(Ordering::Less)
+ /// );
+ /// assert_eq!(
+ /// xs.iter().partial_cmp_by(&ys, |&x, &y| (x * x).partial_cmp(&y)),
+ /// Some(Ordering::Equal)
+ /// );
+ /// assert_eq!(
+ /// xs.iter().partial_cmp_by(&ys, |&x, &y| (2.0 * x).partial_cmp(&y)),
+ /// Some(Ordering::Greater)
+ /// );
+ /// ```
+ #[unstable(feature = "iter_order_by", issue = "64295")]
+ fn partial_cmp_by<I, F>(mut self, other: I, mut partial_cmp: F) -> Option<Ordering>
+ where
+ Self: Sized,
+ I: IntoIterator,
+ F: FnMut(Self::Item, I::Item) -> Option<Ordering>,
+ {
+ let mut other = other.into_iter();
+
+ loop {
+ let x = match self.next() {
+ None => {
+ if other.next().is_none() {
+ return Some(Ordering::Equal);
+ } else {
+ return Some(Ordering::Less);
+ }
+ }
+ Some(val) => val,
+ };
+
+ let y = match other.next() {
+ None => return Some(Ordering::Greater),
+ Some(val) => val,
+ };
+
+ match partial_cmp(x, y) {
+ Some(Ordering::Equal) => (),
+ non_eq => return non_eq,
+ }
+ }
+ }
+
+ /// Determines if the elements of this [`Iterator`] are equal to those of
+ /// another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().eq([1].iter()), true);
+ /// assert_eq!([1].iter().eq([1, 2].iter()), false);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn eq<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialEq<I::Item>,
+ Self: Sized,
+ {
+ self.eq_by(other, |x, y| x == y)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are equal to those of
+ /// another with respect to the specified equality function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(iter_order_by)]
+ ///
+ /// let xs = [1, 2, 3, 4];
+ /// let ys = [1, 4, 9, 16];
+ ///
+ /// assert!(xs.iter().eq_by(&ys, |&x, &y| x * x == y));
+ /// ```
+ #[unstable(feature = "iter_order_by", issue = "64295")]
+ fn eq_by<I, F>(mut self, other: I, mut eq: F) -> bool
+ where
+ Self: Sized,
+ I: IntoIterator,
+ F: FnMut(Self::Item, I::Item) -> bool,
+ {
+ let mut other = other.into_iter();
+
+ loop {
+ let x = match self.next() {
+ None => return other.next().is_none(),
+ Some(val) => val,
+ };
+
+ let y = match other.next() {
+ None => return false,
+ Some(val) => val,
+ };
+
+ if !eq(x, y) {
+ return false;
+ }
+ }
+ }
+
+ /// Determines if the elements of this [`Iterator`] are unequal to those of
+ /// another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().ne([1].iter()), false);
+ /// assert_eq!([1].iter().ne([1, 2].iter()), true);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn ne<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialEq<I::Item>,
+ Self: Sized,
+ {
+ !self.eq(other)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// less than those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().lt([1].iter()), false);
+ /// assert_eq!([1].iter().lt([1, 2].iter()), true);
+ /// assert_eq!([1, 2].iter().lt([1].iter()), false);
+ /// assert_eq!([1, 2].iter().lt([1, 2].iter()), false);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn lt<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ self.partial_cmp(other) == Some(Ordering::Less)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// less or equal to those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().le([1].iter()), true);
+ /// assert_eq!([1].iter().le([1, 2].iter()), true);
+ /// assert_eq!([1, 2].iter().le([1].iter()), false);
+ /// assert_eq!([1, 2].iter().le([1, 2].iter()), true);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn le<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ matches!(self.partial_cmp(other), Some(Ordering::Less | Ordering::Equal))
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// greater than those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().gt([1].iter()), false);
+ /// assert_eq!([1].iter().gt([1, 2].iter()), false);
+ /// assert_eq!([1, 2].iter().gt([1].iter()), true);
+ /// assert_eq!([1, 2].iter().gt([1, 2].iter()), false);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn gt<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ self.partial_cmp(other) == Some(Ordering::Greater)
+ }
+
+ /// Determines if the elements of this [`Iterator`] are [lexicographically](Ord#lexicographical-comparison)
+ /// greater than or equal to those of another.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!([1].iter().ge([1].iter()), true);
+ /// assert_eq!([1].iter().ge([1, 2].iter()), false);
+ /// assert_eq!([1, 2].iter().ge([1].iter()), true);
+ /// assert_eq!([1, 2].iter().ge([1, 2].iter()), true);
+ /// ```
+ #[stable(feature = "iter_order", since = "1.5.0")]
+ fn ge<I>(self, other: I) -> bool
+ where
+ I: IntoIterator,
+ Self::Item: PartialOrd<I::Item>,
+ Self: Sized,
+ {
+ matches!(self.partial_cmp(other), Some(Ordering::Greater | Ordering::Equal))
+ }
+
+ /// Checks if the elements of this iterator are sorted.
+ ///
+ /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
+ /// iterator yields exactly zero or one element, `true` is returned.
+ ///
+ /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
+ /// implies that this function returns `false` if any two consecutive items are not
+ /// comparable.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!([1, 2, 2, 9].iter().is_sorted());
+ /// assert!(![1, 3, 2, 4].iter().is_sorted());
+ /// assert!([0].iter().is_sorted());
+ /// assert!(std::iter::empty::<i32>().is_sorted());
+ /// assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ fn is_sorted(self) -> bool
+ where
+ Self: Sized,
+ Self::Item: PartialOrd,
+ {
+ self.is_sorted_by(PartialOrd::partial_cmp)
+ }
+
+ /// Checks if the elements of this iterator are sorted using the given comparator function.
+ ///
+ /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
+ /// function to determine the ordering of two elements. Apart from that, it's equivalent to
+ /// [`is_sorted`]; see its documentation for more information.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!([1, 2, 2, 9].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!(![1, 3, 2, 4].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!([0].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!(std::iter::empty::<i32>().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// assert!(![0.0, 1.0, f32::NAN].iter().is_sorted_by(|a, b| a.partial_cmp(b)));
+ /// ```
+ ///
+ /// [`is_sorted`]: Iterator::is_sorted
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ fn is_sorted_by<F>(mut self, mut compare: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
+ {
+ let mut last = match self.next() {
+ Some(e) => e,
+ None => return true,
+ };
+
+ while let Some(curr) = self.next() {
+ if let Some(Ordering::Greater) | None = compare(&last, &curr) {
+ return false;
+ }
+ last = curr;
+ }
+
+ true
+ }
+
+ /// Checks if the elements of this iterator are sorted using the given key extraction
+ /// function.
+ ///
+ /// Instead of comparing the iterator's elements directly, this function compares the keys of
+ /// the elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see
+ /// its documentation for more information.
+ ///
+ /// [`is_sorted`]: Iterator::is_sorted
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len()));
+ /// assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ fn is_sorted_by_key<F, K>(self, f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> K,
+ K: PartialOrd,
+ {
+ self.map(f).is_sorted()
+ }
+
+ /// See [TrustedRandomAccess]
+ // The unusual name is to avoid name collisions in method resolution
+ // see #76479.
+ #[inline]
+ #[doc(hidden)]
+ #[unstable(feature = "trusted_random_access", issue = "none")]
+ unsafe fn __iterator_get_unchecked(&mut self, _idx: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccess,
+ {
+ unreachable!("Always specialized");
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator + ?Sized> Iterator for &mut I {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<I::Item> {
+ (**self).next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ (**self).advance_by(n)
+ }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ (**self).nth(n)
+ }
+}
--- /dev/null
+/// An iterator that always continues to yield `None` when exhausted.
+///
+/// Calling next on a fused iterator that has returned `None` once is guaranteed
+/// to return [`None`] again. This trait should be implemented by all iterators
+/// that behave this way because it allows optimizing [`Iterator::fuse()`].
+///
+/// Note: In general, you should not use `FusedIterator` in generic bounds if
+/// you need a fused iterator. Instead, you should just call [`Iterator::fuse()`]
+/// on the iterator. If the iterator is already fused, the additional [`Fuse`]
+/// wrapper will be a no-op with no performance penalty.
+///
+/// [`Fuse`]: crate::iter::Fuse
+#[stable(feature = "fused", since = "1.26.0")]
+#[rustc_unsafe_specialization_marker]
+pub trait FusedIterator: Iterator {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator + ?Sized> FusedIterator for &mut I {}
+
+/// An iterator that reports an accurate length using size_hint.
+///
+/// The iterator reports a size hint where it is either exact
+/// (lower bound is equal to upper bound), or the upper bound is [`None`].
+/// The upper bound must only be [`None`] if the actual iterator length is
+/// larger than [`usize::MAX`]. In that case, the lower bound must be
+/// [`usize::MAX`], resulting in a [`Iterator::size_hint()`] of
+/// `(usize::MAX, None)`.
+///
+/// The iterator must produce exactly the number of elements it reported
+/// or diverge before reaching the end.
+///
+/// # Safety
+///
+/// This trait must only be implemented when the contract is upheld. Consumers
+/// of this trait must inspect [`Iterator::size_hint()`]’s upper bound.
+///
+/// [`usize::MAX`]: crate::usize::MAX
+#[unstable(feature = "trusted_len", issue = "37572")]
+#[rustc_unsafe_specialization_marker]
+pub unsafe trait TrustedLen: Iterator {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<I: TrustedLen + ?Sized> TrustedLen for &mut I {}
+
+/// An iterator that when yielding an item will have taken at least one element
+/// from its underlying [`SourceIter`].
+///
+/// Calling [`next()`] guarantees that at least one value of the iterator's underlying source
+/// has been moved out and the result of the iterator chain could be inserted in its place,
+/// assuming structural constraints of the source allow such an insertion.
+/// In other words this trait indicates that an iterator pipeline can be collected in place.
+///
+/// [`SourceIter`]: crate::iter::SourceIter
+/// [`next()`]: Iterator::next
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub unsafe trait InPlaceIterable: Iterator {}
--- /dev/null
+mod accum;
+mod collect;
+mod double_ended;
+mod exact_size;
+mod iterator;
+mod marker;
+
+pub use self::accum::{Product, Sum};
+pub use self::collect::{Extend, FromIterator, IntoIterator};
+pub use self::double_ended::DoubleEndedIterator;
+pub use self::exact_size::ExactSizeIterator;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iterator::Iterator;
+#[unstable(issue = "none", feature = "inplace_iteration")]
+pub use self::marker::InPlaceIterable;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::marker::{FusedIterator, TrustedLen};
--- /dev/null
+//! Lazy values and one-time initialization of static data.
+
+use crate::cell::{Cell, UnsafeCell};
+use crate::fmt;
+use crate::mem;
+use crate::ops::Deref;
+
+/// A cell which can be written to only once.
+///
+/// Unlike `RefCell`, a `OnceCell` only provides shared `&T` references to its value.
+/// Unlike `Cell`, a `OnceCell` doesn't require copying or replacing the value to access it.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::lazy::OnceCell;
+///
+/// let cell = OnceCell::new();
+/// assert!(cell.get().is_none());
+///
+/// let value: &String = cell.get_or_init(|| {
+/// "Hello, World!".to_string()
+/// });
+/// assert_eq!(value, "Hello, World!");
+/// assert!(cell.get().is_some());
+/// ```
+#[unstable(feature = "once_cell", issue = "74465")]
+pub struct OnceCell<T> {
+ // Invariant: written to at most once.
+ inner: UnsafeCell<Option<T>>,
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T> Default for OnceCell<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.get() {
+ Some(v) => f.debug_tuple("OnceCell").field(v).finish(),
+ None => f.write_str("OnceCell(Uninit)"),
+ }
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Clone> Clone for OnceCell<T> {
+ fn clone(&self) -> OnceCell<T> {
+ let res = OnceCell::new();
+ if let Some(value) = self.get() {
+ match res.set(value.clone()) {
+ Ok(()) => (),
+ Err(_) => unreachable!(),
+ }
+ }
+ res
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: PartialEq> PartialEq for OnceCell<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.get() == other.get()
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Eq> Eq for OnceCell<T> {}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T> From<T> for OnceCell<T> {
+ fn from(value: T) -> Self {
+ OnceCell { inner: UnsafeCell::new(Some(value)) }
+ }
+}
+
+impl<T> OnceCell<T> {
+ /// Creates a new empty cell.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub const fn new() -> OnceCell<T> {
+ OnceCell { inner: UnsafeCell::new(None) }
+ }
+
+ /// Gets the reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get(&self) -> Option<&T> {
+ // SAFETY: Safe due to `inner`'s invariant
+ unsafe { &*self.inner.get() }.as_ref()
+ }
+
+ /// Gets the mutable reference to the underlying value.
+ ///
+ /// Returns `None` if the cell is empty.
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_mut(&mut self) -> Option<&mut T> {
+ // SAFETY: Safe because we have unique access
+ unsafe { &mut *self.inner.get() }.as_mut()
+ }
+
+ /// Sets the contents of the cell to `value`.
+ ///
+ /// # Errors
+ ///
+ /// This method returns `Ok(())` if the cell was empty and `Err(value)` if
+ /// it was full.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// assert!(cell.get().is_none());
+ ///
+ /// assert_eq!(cell.set(92), Ok(()));
+ /// assert_eq!(cell.set(62), Err(62));
+ ///
+ /// assert!(cell.get().is_some());
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn set(&self, value: T) -> Result<(), T> {
+ // SAFETY: Safe because we cannot have overlapping mutable borrows
+ let slot = unsafe { &*self.inner.get() };
+ if slot.is_some() {
+ return Err(value);
+ }
+
+ // SAFETY: This is the only place where we set the slot, no races
+ // due to reentrancy/concurrency are possible, and we've
+ // checked that slot is currently `None`, so this write
+ // maintains the `inner`'s invariant.
+ let slot = unsafe { &mut *self.inner.get() };
+ *slot = Some(value);
+ Ok(())
+ }
+
+ /// Gets the contents of the cell, initializing it with `f`
+ /// if the cell was empty.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and the cell
+ /// remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. Doing
+ /// so results in a panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// let value = cell.get_or_init(|| 92);
+ /// assert_eq!(value, &92);
+ /// let value = cell.get_or_init(|| unreachable!());
+ /// assert_eq!(value, &92);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_or_init<F>(&self, f: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ match self.get_or_try_init(|| Ok::<T, !>(f())) {
+ Ok(val) => val,
+ }
+ }
+
+ /// Gets the contents of the cell, initializing it with `f` if
+ /// the cell was empty. If the cell was empty and `f` failed, an
+ /// error is returned.
+ ///
+ /// # Panics
+ ///
+ /// If `f` panics, the panic is propagated to the caller, and the cell
+ /// remains uninitialized.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. Doing
+ /// so results in a panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::OnceCell;
+ ///
+ /// let cell = OnceCell::new();
+ /// assert_eq!(cell.get_or_try_init(|| Err(())), Err(()));
+ /// assert!(cell.get().is_none());
+ /// let value = cell.get_or_try_init(|| -> Result<i32, ()> {
+ /// Ok(92)
+ /// });
+ /// assert_eq!(value, Ok(&92));
+ /// assert_eq!(cell.get(), Some(&92))
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ if let Some(val) = self.get() {
+ return Ok(val);
+ }
+ let val = f()?;
+ // Note that *some* forms of reentrant initialization might lead to
+ // UB (see `reentrant_init` test). I believe that just removing this
+ // `assert`, while keeping `set/get` would be sound, but it seems
+ // better to panic, rather than to silently use an old value.
+ assert!(self.set(val).is_ok(), "reentrant init");
+ Ok(self.get().unwrap())
+ }
+
+ /// Consumes the cell, returning the wrapped value.
+ ///
+ /// Returns `None` if the cell was empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::OnceCell;
+ ///
+ /// let cell: OnceCell<String> = OnceCell::new();
+ /// assert_eq!(cell.into_inner(), None);
+ ///
+ /// let cell = OnceCell::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.into_inner(), Some("hello".to_string()));
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn into_inner(self) -> Option<T> {
+ // Because `into_inner` takes `self` by value, the compiler statically verifies
+ // that it is not currently borrowed. So it is safe to move out `Option<T>`.
+ self.inner.into_inner()
+ }
+
+ /// Takes the value out of this `OnceCell`, moving it back to an uninitialized state.
+ ///
+ /// Has no effect and returns `None` if the `OnceCell` hasn't been initialized.
+ ///
+ /// Safety is guaranteed by requiring a mutable reference.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::OnceCell;
+ ///
+ /// let mut cell: OnceCell<String> = OnceCell::new();
+ /// assert_eq!(cell.take(), None);
+ ///
+ /// let mut cell = OnceCell::new();
+ /// cell.set("hello".to_string()).unwrap();
+ /// assert_eq!(cell.take(), Some("hello".to_string()));
+ /// assert_eq!(cell.get(), None);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn take(&mut self) -> Option<T> {
+ mem::take(self).into_inner()
+ }
+}
+
+/// A value which is initialized on the first access.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(once_cell)]
+///
+/// use std::lazy::Lazy;
+///
+/// let lazy: Lazy<i32> = Lazy::new(|| {
+/// println!("initializing");
+/// 92
+/// });
+/// println!("ready");
+/// println!("{}", *lazy);
+/// println!("{}", *lazy);
+///
+/// // Prints:
+/// // ready
+/// // initializing
+/// // 92
+/// // 92
+/// ```
+#[unstable(feature = "once_cell", issue = "74465")]
+pub struct Lazy<T, F = fn() -> T> {
+ cell: OnceCell<T>,
+ init: Cell<Option<F>>,
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: fmt::Debug, F> fmt::Debug for Lazy<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
+ }
+}
+
+impl<T, F> Lazy<T, F> {
+ /// Creates a new lazy value with the given initializing function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// # fn main() {
+ /// use std::lazy::Lazy;
+ ///
+ /// let hello = "Hello, World!".to_string();
+ ///
+ /// let lazy = Lazy::new(|| hello.to_uppercase());
+ ///
+ /// assert_eq!(&*lazy, "HELLO, WORLD!");
+ /// # }
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub const fn new(init: F) -> Lazy<T, F> {
+ Lazy { cell: OnceCell::new(), init: Cell::new(Some(init)) }
+ }
+}
+
+impl<T, F: FnOnce() -> T> Lazy<T, F> {
+ /// Forces the evaluation of this lazy value and returns a reference to
+ /// the result.
+ ///
+ /// This is equivalent to the `Deref` impl, but is explicit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(once_cell)]
+ ///
+ /// use std::lazy::Lazy;
+ ///
+ /// let lazy = Lazy::new(|| 92);
+ ///
+ /// assert_eq!(Lazy::force(&lazy), &92);
+ /// assert_eq!(&*lazy, &92);
+ /// ```
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub fn force(this: &Lazy<T, F>) -> &T {
+ this.cell.get_or_init(|| match this.init.take() {
+ Some(f) => f(),
+ None => panic!("`Lazy` instance has previously been poisoned"),
+ })
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T, F: FnOnce() -> T> Deref for Lazy<T, F> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ Lazy::force(self)
+ }
+}
+
+#[unstable(feature = "once_cell", issue = "74465")]
+impl<T: Default> Default for Lazy<T> {
+ /// Creates a new lazy value using `Default` as the initializing function.
+ fn default() -> Lazy<T> {
+ Lazy::new(T::default)
+ }
+}
--- /dev/null
+//! # The Rust Core Library
+//!
+//! The Rust Core Library is the dependency-free[^free] foundation of [The
+//! Rust Standard Library](../std/index.html). It is the portable glue
+//! between the language and its libraries, defining the intrinsic and
+//! primitive building blocks of all Rust code. It links to no
+//! upstream libraries, no system libraries, and no libc.
+//!
+//! [^free]: Strictly speaking, there are some symbols which are needed but
+//! they aren't always necessary.
+//!
+//! The core library is *minimal*: it isn't even aware of heap allocation,
+//! nor does it provide concurrency or I/O. These things require
+//! platform integration, and this library is platform-agnostic.
+//!
+//! # How to use the core library
+//!
+//! Please note that all of these details are currently not considered stable.
+//!
+// FIXME: Fill me in with more detail when the interface settles
+//! This library is built on the assumption of a few existing symbols:
+//!
+//! * `memcpy`, `memcmp`, `memset` - These are core memory routines which are
+//! often generated by LLVM. Additionally, this library can make explicit
+//! calls to these functions. Their signatures are the same as found in C.
+//! These functions are often provided by the system libc, but can also be
+//! provided by the [compiler-builtins crate](https://crates.io/crates/compiler_builtins).
+//!
+//! * `rust_begin_panic` - This function takes four arguments, a
+//! `fmt::Arguments`, a `&'static str`, and two `u32`'s. These four arguments
+//! dictate the panic message, the file at which panic was invoked, and the
+//! line and column inside the file. It is up to consumers of this core
+//! library to define this panic function; it is only required to never
+//! return. This requires a `lang` attribute named `panic_impl`.
+//!
+//! * `rust_eh_personality` - is used by the failure mechanisms of the
+//! compiler. This is often mapped to GCC's personality function, but crates
+//! which do not trigger a panic can be assured that this function is never
+//! called. The `lang` attribute is called `eh_personality`.
+
+// Since libcore defines many fundamental lang items, all tests live in a
+// separate crate, libcoretest, to avoid bizarre issues.
+//
+// Here we explicitly #[cfg]-out this whole crate when testing. If we don't do
+// this, both the generated test artifact and the linked libtest (which
+// transitively includes libcore) will both define the same set of lang items,
+// and this will cause the E0152 "found duplicate lang item" error. See
+// discussion in #50466 for details.
+//
+// This cfg won't affect doc tests.
+#![cfg(not(test))]
+#![stable(feature = "core", since = "1.6.0")]
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/",
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
+ test(no_crate_inject, attr(deny(warnings))),
+ test(attr(allow(dead_code, deprecated, unused_variables, unused_mut)))
+)]
+#![no_core]
+#![warn(deprecated_in_future)]
+#![warn(missing_docs)]
+#![warn(missing_debug_implementations)]
+#![allow(explicit_outlives_requirements)]
+#![allow(incomplete_features)]
+#![cfg_attr(not(bootstrap), feature(rustc_allow_const_fn_unstable))]
+#![feature(allow_internal_unstable)]
+#![feature(arbitrary_self_types)]
+#![feature(asm)]
+#![feature(cfg_target_has_atomic)]
+#![feature(const_alloc_layout)]
+#![feature(const_discriminant)]
+#![feature(const_cell_into_inner)]
+#![feature(const_checked_int_methods)]
+#![feature(const_euclidean_int_methods)]
+#![feature(const_float_classify)]
+#![feature(const_float_bits_conv)]
+#![feature(const_overflowing_int_methods)]
+#![feature(const_int_unchecked_arith)]
+#![feature(const_mut_refs)]
+#![feature(const_int_pow)]
+#![feature(constctlz)]
+#![feature(const_panic)]
+#![feature(const_pin)]
+#![feature(const_fn)]
+#![feature(const_fn_union)]
+#![cfg_attr(not(bootstrap), feature(const_impl_trait))]
+#![feature(const_fn_floating_point_arithmetic)]
+#![feature(const_fn_fn_ptr_basics)]
+#![feature(const_generics)]
+#![feature(const_option)]
+#![feature(const_precise_live_drops)]
+#![feature(const_ptr_offset)]
+#![feature(const_ptr_offset_from)]
+#![feature(const_raw_ptr_comparison)]
+#![feature(const_slice_from_raw_parts)]
+#![feature(const_slice_ptr_len)]
+#![feature(const_size_of_val)]
+#![feature(const_align_of_val)]
+#![feature(const_type_id)]
+#![feature(const_type_name)]
+#![feature(const_likely)]
+#![feature(const_unreachable_unchecked)]
+#![feature(custom_inner_attributes)]
+#![feature(decl_macro)]
+#![feature(doc_cfg)]
+#![feature(doc_spotlight)]
+#![feature(duration_consts_2)]
+#![feature(duration_saturating_ops)]
+#![feature(extern_types)]
+#![feature(fundamental)]
+#![feature(intrinsics)]
+#![feature(lang_items)]
+#![feature(link_llvm_intrinsics)]
+#![feature(llvm_asm)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(nll)]
+#![feature(exhaustive_patterns)]
+#![feature(no_core)]
+#![feature(optin_builtin_traits)]
+#![feature(or_patterns)]
+#![feature(prelude_import)]
+#![feature(repr_simd, platform_intrinsics)]
+#![feature(rustc_attrs)]
+#![feature(simd_ffi)]
+#![feature(min_specialization)]
+#![feature(staged_api)]
+#![feature(std_internals)]
+#![feature(stmt_expr_attributes)]
+#![feature(str_split_as_str)]
+#![feature(str_split_inclusive_as_str)]
+#![feature(transparent_unions)]
+#![feature(try_blocks)]
+#![feature(unboxed_closures)]
+#![cfg_attr(not(bootstrap), feature(unsized_fn_params))]
+#![cfg_attr(bootstrap, feature(unsized_locals))]
+#![cfg_attr(bootstrap, feature(untagged_unions))]
+#![feature(unwind_attributes)]
+#![feature(variant_count)]
+#![feature(tbm_target_feature)]
+#![feature(sse4a_target_feature)]
+#![feature(arm_target_feature)]
+#![feature(powerpc_target_feature)]
+#![feature(mips_target_feature)]
+#![feature(aarch64_target_feature)]
+#![feature(wasm_target_feature)]
+#![feature(avx512_target_feature)]
+#![feature(cmpxchg16b_target_feature)]
+#![feature(rtm_target_feature)]
+#![feature(f16c_target_feature)]
+#![feature(hexagon_target_feature)]
+#![feature(const_fn_transmute)]
+#![feature(abi_unadjusted)]
+#![feature(adx_target_feature)]
+#![feature(external_doc)]
+#![feature(associated_type_bounds)]
+#![feature(const_caller_location)]
+#![feature(slice_ptr_get)]
+#![feature(no_niche)] // rust-lang/rust#68303
+#![feature(unsafe_block_in_unsafe_fn)]
+#![feature(int_error_matching)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+#[prelude_import]
+#[allow(unused)]
+use prelude::v1::*;
+
+#[cfg(not(test))] // See #65860
+#[macro_use]
+mod macros;
+
+#[macro_use]
+mod internal_macros;
+
+#[path = "num/shells/int_macros.rs"]
+#[macro_use]
+mod int_macros;
+
+#[path = "num/shells/i128.rs"]
+pub mod i128;
+#[path = "num/shells/i16.rs"]
+pub mod i16;
+#[path = "num/shells/i32.rs"]
+pub mod i32;
+#[path = "num/shells/i64.rs"]
+pub mod i64;
+#[path = "num/shells/i8.rs"]
+pub mod i8;
+#[path = "num/shells/isize.rs"]
+pub mod isize;
+
+#[path = "num/shells/u128.rs"]
+pub mod u128;
+#[path = "num/shells/u16.rs"]
+pub mod u16;
+#[path = "num/shells/u32.rs"]
+pub mod u32;
+#[path = "num/shells/u64.rs"]
+pub mod u64;
+#[path = "num/shells/u8.rs"]
+pub mod u8;
+#[path = "num/shells/usize.rs"]
+pub mod usize;
+
+#[path = "num/f32.rs"]
+pub mod f32;
+#[path = "num/f64.rs"]
+pub mod f64;
+
+#[macro_use]
+pub mod num;
+
+/* The libcore prelude, not as all-encompassing as the libstd prelude */
+
+pub mod prelude;
+
+/* Core modules for ownership management */
+
+pub mod hint;
+pub mod intrinsics;
+pub mod mem;
+pub mod ptr;
+
+/* Core language traits */
+
+pub mod borrow;
+pub mod clone;
+pub mod cmp;
+pub mod convert;
+pub mod default;
+pub mod marker;
+pub mod ops;
+
+/* Core types and methods on primitives */
+
+pub mod any;
+pub mod array;
+pub mod ascii;
+pub mod cell;
+pub mod char;
+pub mod ffi;
+pub mod iter;
+#[unstable(feature = "once_cell", issue = "74465")]
+pub mod lazy;
+pub mod option;
+pub mod panic;
+pub mod panicking;
+pub mod pin;
+pub mod raw;
+pub mod result;
+pub mod sync;
+
+pub mod fmt;
+pub mod hash;
+pub mod slice;
+pub mod str;
+pub mod time;
+
+pub mod unicode;
+
+/* Async */
+pub mod future;
+pub mod task;
+
+/* Heap memory allocator trait */
+#[allow(missing_docs)]
+pub mod alloc;
+
+// note: does not need to be public
+mod bool;
+mod tuple;
+mod unit;
+
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub mod primitive;
+
+// Pull in the `core_arch` crate directly into libcore. The contents of
+// `core_arch` are in a different repository: rust-lang/stdarch.
+//
+// `core_arch` depends on libcore, but the contents of this module are
+// set up in such a way that directly pulling it here works such that the
+// crate uses the this crate as its libcore.
+#[path = "../../stdarch/crates/core_arch/src/mod.rs"]
+#[allow(
+ missing_docs,
+ missing_debug_implementations,
+ dead_code,
+ unused_imports,
+ unsafe_op_in_unsafe_fn
+)]
+#[cfg_attr(not(bootstrap), allow(non_autolinks))]
+// FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is
+// merged. It currently cannot because bootstrap fails as the lint hasn't been defined yet.
+#[allow(clashing_extern_declarations)]
+#[unstable(feature = "stdsimd", issue = "48556")]
+mod core_arch;
+
+#[stable(feature = "simd_arch", since = "1.27.0")]
+pub use core_arch::arch;
--- /dev/null
+#[doc(include = "panic.md")]
+#[macro_export]
+#[allow_internal_unstable(core_panic, const_caller_location)]
+#[stable(feature = "core", since = "1.6.0")]
+macro_rules! panic {
+ () => (
+ $crate::panic!("explicit panic")
+ );
+ ($msg:literal $(,)?) => (
+ $crate::panicking::panic($msg)
+ );
+ ($msg:expr $(,)?) => (
+ $crate::panicking::panic_str($msg)
+ );
+ ($fmt:expr, $($arg:tt)+) => (
+ $crate::panicking::panic_fmt($crate::format_args!($fmt, $($arg)+))
+ );
+}
+
+/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Like [`assert!`], this macro has a second form, where a custom
+/// panic message can be provided.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 1 + 2;
+/// assert_eq!(a, b);
+///
+/// assert_eq!(a, b, "we are testing addition with {} and {}", a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+macro_rules! assert_eq {
+ ($left:expr, $right:expr $(,)?) => ({
+ match (&$left, &$right) {
+ (left_val, right_val) => {
+ if !(*left_val == *right_val) {
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ panic!(r#"assertion failed: `(left == right)`
+ left: `{:?}`,
+ right: `{:?}`"#, &*left_val, &*right_val)
+ }
+ }
+ }
+ });
+ ($left:expr, $right:expr, $($arg:tt)+) => ({
+ match (&($left), &($right)) {
+ (left_val, right_val) => {
+ if !(*left_val == *right_val) {
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ panic!(r#"assertion failed: `(left == right)`
+ left: `{:?}`,
+ right: `{:?}`: {}"#, &*left_val, &*right_val,
+ $crate::format_args!($($arg)+))
+ }
+ }
+ }
+ });
+}
+
+/// Asserts that two expressions are not equal to each other (using [`PartialEq`]).
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Like [`assert!`], this macro has a second form, where a custom
+/// panic message can be provided.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 2;
+/// assert_ne!(a, b);
+///
+/// assert_ne!(a, b, "we are testing that the values are not equal");
+/// ```
+#[macro_export]
+#[stable(feature = "assert_ne", since = "1.13.0")]
+macro_rules! assert_ne {
+ ($left:expr, $right:expr $(,)?) => ({
+ match (&$left, &$right) {
+ (left_val, right_val) => {
+ if *left_val == *right_val {
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ panic!(r#"assertion failed: `(left != right)`
+ left: `{:?}`,
+ right: `{:?}`"#, &*left_val, &*right_val)
+ }
+ }
+ }
+ });
+ ($left:expr, $right:expr, $($arg:tt)+) => ({
+ match (&($left), &($right)) {
+ (left_val, right_val) => {
+ if *left_val == *right_val {
+ // The reborrows below are intentional. Without them, the stack slot for the
+ // borrow is initialized even before the values are compared, leading to a
+ // noticeable slow down.
+ panic!(r#"assertion failed: `(left != right)`
+ left: `{:?}`,
+ right: `{:?}`: {}"#, &*left_val, &*right_val,
+ $crate::format_args!($($arg)+))
+ }
+ }
+ }
+ });
+}
+
+/// Asserts that a boolean expression is `true` at runtime.
+///
+/// This will invoke the [`panic!`] macro if the provided expression cannot be
+/// evaluated to `true` at runtime.
+///
+/// Like [`assert!`], this macro also has a second version, where a custom panic
+/// message can be provided.
+///
+/// # Uses
+///
+/// Unlike [`assert!`], `debug_assert!` statements are only enabled in non
+/// optimized builds by default. An optimized build will not execute
+/// `debug_assert!` statements unless `-C debug-assertions` is passed to the
+/// compiler. This makes `debug_assert!` useful for checks that are too
+/// expensive to be present in a release build but may be helpful during
+/// development. The result of expanding `debug_assert!` is always type checked.
+///
+/// An unchecked assertion allows a program in an inconsistent state to keep
+/// running, which might have unexpected consequences but does not introduce
+/// unsafety as long as this only happens in safe code. The performance cost
+/// of assertions, however, is not measurable in general. Replacing [`assert!`]
+/// with `debug_assert!` is thus only encouraged after thorough profiling, and
+/// more importantly, only in safe code!
+///
+/// # Examples
+///
+/// ```
+/// // the panic message for these assertions is the stringified value of the
+/// // expression given.
+/// debug_assert!(true);
+///
+/// fn some_expensive_computation() -> bool { true } // a very simple function
+/// debug_assert!(some_expensive_computation());
+///
+/// // assert with a custom message
+/// let x = true;
+/// debug_assert!(x, "x wasn't true!");
+///
+/// let a = 3; let b = 27;
+/// debug_assert!(a + b == 30, "a = {}, b = {}", a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+macro_rules! debug_assert {
+ ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert!($($arg)*); })
+}
+
+/// Asserts that two expressions are equal to each other.
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Unlike [`assert_eq!`], `debug_assert_eq!` statements are only enabled in non
+/// optimized builds by default. An optimized build will not execute
+/// `debug_assert_eq!` statements unless `-C debug-assertions` is passed to the
+/// compiler. This makes `debug_assert_eq!` useful for checks that are too
+/// expensive to be present in a release build but may be helpful during
+/// development. The result of expanding `debug_assert_eq!` is always type checked.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 1 + 2;
+/// debug_assert_eq!(a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+macro_rules! debug_assert_eq {
+ ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert_eq!($($arg)*); })
+}
+
+/// Asserts that two expressions are not equal to each other.
+///
+/// On panic, this macro will print the values of the expressions with their
+/// debug representations.
+///
+/// Unlike [`assert_ne!`], `debug_assert_ne!` statements are only enabled in non
+/// optimized builds by default. An optimized build will not execute
+/// `debug_assert_ne!` statements unless `-C debug-assertions` is passed to the
+/// compiler. This makes `debug_assert_ne!` useful for checks that are too
+/// expensive to be present in a release build but may be helpful during
+/// development. The result of expanding `debug_assert_ne!` is always type checked.
+///
+/// # Examples
+///
+/// ```
+/// let a = 3;
+/// let b = 2;
+/// debug_assert_ne!(a, b);
+/// ```
+#[macro_export]
+#[stable(feature = "assert_ne", since = "1.13.0")]
+macro_rules! debug_assert_ne {
+ ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert_ne!($($arg)*); })
+}
+
+/// Returns whether the given expression matches any of the given patterns.
+///
+/// Like in a `match` expression, the pattern can be optionally followed by `if`
+/// and a guard expression that has access to names bound by the pattern.
+///
+/// # Examples
+///
+/// ```
+/// let foo = 'f';
+/// assert!(matches!(foo, 'A'..='Z' | 'a'..='z'));
+///
+/// let bar = Some(4);
+/// assert!(matches!(bar, Some(x) if x > 2));
+/// ```
+#[macro_export]
+#[stable(feature = "matches_macro", since = "1.42.0")]
+macro_rules! matches {
+ ($expression:expr, $( $pattern:pat )|+ $( if $guard: expr )? $(,)?) => {
+ match $expression {
+ $( $pattern )|+ $( if $guard )? => true,
+ _ => false
+ }
+ }
+}
+
+/// Unwraps a result or propagates its error.
+///
+/// The `?` operator was added to replace `try!` and should be used instead.
+/// Furthermore, `try` is a reserved word in Rust 2018, so if you must use
+/// it, you will need to use the [raw-identifier syntax][ris]: `r#try`.
+///
+/// [ris]: https://doc.rust-lang.org/nightly/rust-by-example/compatibility/raw_identifiers.html
+///
+/// `try!` matches the given [`Result`]. In case of the `Ok` variant, the
+/// expression has the value of the wrapped value.
+///
+/// In case of the `Err` variant, it retrieves the inner error. `try!` then
+/// performs conversion using `From`. This provides automatic conversion
+/// between specialized errors and more general ones. The resulting
+/// error is then immediately returned.
+///
+/// Because of the early return, `try!` can only be used in functions that
+/// return [`Result`].
+///
+/// # Examples
+///
+/// ```
+/// use std::io;
+/// use std::fs::File;
+/// use std::io::prelude::*;
+///
+/// enum MyError {
+/// FileWriteError
+/// }
+///
+/// impl From<io::Error> for MyError {
+/// fn from(e: io::Error) -> MyError {
+/// MyError::FileWriteError
+/// }
+/// }
+///
+/// // The preferred method of quick returning Errors
+/// fn write_to_file_question() -> Result<(), MyError> {
+/// let mut file = File::create("my_best_friends.txt")?;
+/// file.write_all(b"This is a list of my best friends.")?;
+/// Ok(())
+/// }
+///
+/// // The previous method of quick returning Errors
+/// fn write_to_file_using_try() -> Result<(), MyError> {
+/// let mut file = r#try!(File::create("my_best_friends.txt"));
+/// r#try!(file.write_all(b"This is a list of my best friends."));
+/// Ok(())
+/// }
+///
+/// // This is equivalent to:
+/// fn write_to_file_using_match() -> Result<(), MyError> {
+/// let mut file = r#try!(File::create("my_best_friends.txt"));
+/// match file.write_all(b"This is a list of my best friends.") {
+/// Ok(v) => v,
+/// Err(e) => return Err(From::from(e)),
+/// }
+/// Ok(())
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(since = "1.39.0", reason = "use the `?` operator instead")]
+#[doc(alias = "?")]
+macro_rules! r#try {
+ ($expr:expr $(,)?) => {
+ match $expr {
+ $crate::result::Result::Ok(val) => val,
+ $crate::result::Result::Err(err) => {
+ return $crate::result::Result::Err($crate::convert::From::from(err));
+ }
+ }
+ };
+}
+
+/// Writes formatted data into a buffer.
+///
+/// This macro accepts a 'writer', a format string, and a list of arguments. Arguments will be
+/// formatted according to the specified format string and the result will be passed to the writer.
+/// The writer may be any value with a `write_fmt` method; generally this comes from an
+/// implementation of either the [`fmt::Write`] or the [`io::Write`] trait. The macro
+/// returns whatever the `write_fmt` method returns; commonly a [`fmt::Result`], or an
+/// [`io::Result`].
+///
+/// See [`std::fmt`] for more information on the format string syntax.
+///
+/// [`std::fmt`]: ../std/fmt/index.html
+/// [`fmt::Write`]: crate::fmt::Write
+/// [`io::Write`]: ../std/io/trait.Write.html
+/// [`fmt::Result`]: crate::fmt::Result
+/// [`io::Result`]: ../std/io/type.Result.html
+///
+/// # Examples
+///
+/// ```
+/// use std::io::Write;
+///
+/// fn main() -> std::io::Result<()> {
+/// let mut w = Vec::new();
+/// write!(&mut w, "test")?;
+/// write!(&mut w, "formatted {}", "arguments")?;
+///
+/// assert_eq!(w, b"testformatted arguments");
+/// Ok(())
+/// }
+/// ```
+///
+/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects
+/// implementing either, as objects do not typically implement both. However, the module must
+/// import the traits qualified so their names do not conflict:
+///
+/// ```
+/// use std::fmt::Write as FmtWrite;
+/// use std::io::Write as IoWrite;
+///
+/// fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// let mut s = String::new();
+/// let mut v = Vec::new();
+///
+/// write!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt
+/// write!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt
+/// assert_eq!(v, b"s = \"abc 123\"");
+/// Ok(())
+/// }
+/// ```
+///
+/// Note: This macro can be used in `no_std` setups as well.
+/// In a `no_std` setup you are responsible for the implementation details of the components.
+///
+/// ```no_run
+/// # extern crate core;
+/// use core::fmt::Write;
+///
+/// struct Example;
+///
+/// impl Write for Example {
+/// fn write_str(&mut self, _s: &str) -> core::fmt::Result {
+/// unimplemented!();
+/// }
+/// }
+///
+/// let mut m = Example{};
+/// write!(&mut m, "Hello World").expect("Not written");
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+macro_rules! write {
+ ($dst:expr, $($arg:tt)*) => ($dst.write_fmt($crate::format_args!($($arg)*)))
+}
+
+/// Write formatted data into a buffer, with a newline appended.
+///
+/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone
+/// (no additional CARRIAGE RETURN (`\r`/`U+000D`).
+///
+/// For more information, see [`write!`]. For information on the format string syntax, see
+/// [`std::fmt`].
+///
+/// [`std::fmt`]: crate::fmt
+///
+/// # Examples
+///
+/// ```
+/// use std::io::{Write, Result};
+///
+/// fn main() -> Result<()> {
+/// let mut w = Vec::new();
+/// writeln!(&mut w)?;
+/// writeln!(&mut w, "test")?;
+/// writeln!(&mut w, "formatted {}", "arguments")?;
+///
+/// assert_eq!(&w[..], "\ntest\nformatted arguments\n".as_bytes());
+/// Ok(())
+/// }
+/// ```
+///
+/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects
+/// implementing either, as objects do not typically implement both. However, the module must
+/// import the traits qualified so their names do not conflict:
+///
+/// ```
+/// use std::fmt::Write as FmtWrite;
+/// use std::io::Write as IoWrite;
+///
+/// fn main() -> Result<(), Box<dyn std::error::Error>> {
+/// let mut s = String::new();
+/// let mut v = Vec::new();
+///
+/// writeln!(&mut s, "{} {}", "abc", 123)?; // uses fmt::Write::write_fmt
+/// writeln!(&mut v, "s = {:?}", s)?; // uses io::Write::write_fmt
+/// assert_eq!(v, b"s = \"abc 123\\n\"\n");
+/// Ok(())
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow_internal_unstable(format_args_nl)]
+macro_rules! writeln {
+ ($dst:expr $(,)?) => (
+ $crate::write!($dst, "\n")
+ );
+ ($dst:expr, $($arg:tt)*) => (
+ $dst.write_fmt($crate::format_args_nl!($($arg)*))
+ );
+}
+
+/// Indicates unreachable code.
+///
+/// This is useful any time that the compiler can't determine that some code is unreachable. For
+/// example:
+///
+/// * Match arms with guard conditions.
+/// * Loops that dynamically terminate.
+/// * Iterators that dynamically terminate.
+///
+/// If the determination that the code is unreachable proves incorrect, the
+/// program immediately terminates with a [`panic!`].
+///
+/// The unsafe counterpart of this macro is the [`unreachable_unchecked`] function, which
+/// will cause undefined behavior if the code is reached.
+///
+/// [`unreachable_unchecked`]: crate::hint::unreachable_unchecked
+///
+/// # Panics
+///
+/// This will always [`panic!`]
+///
+/// # Examples
+///
+/// Match arms:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// fn foo(x: Option<i32>) {
+/// match x {
+/// Some(n) if n >= 0 => println!("Some(Non-negative)"),
+/// Some(n) if n < 0 => println!("Some(Negative)"),
+/// Some(_) => unreachable!(), // compile error if commented out
+/// None => println!("None")
+/// }
+/// }
+/// ```
+///
+/// Iterators:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// fn divide_by_three(x: u32) -> u32 { // one of the poorest implementations of x/3
+/// for i in 0.. {
+/// if 3*i < i { panic!("u32 overflow"); }
+/// if x < 3*i { return i-1; }
+/// }
+/// unreachable!();
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+macro_rules! unreachable {
+ () => ({
+ panic!("internal error: entered unreachable code")
+ });
+ ($msg:expr $(,)?) => ({
+ $crate::unreachable!("{}", $msg)
+ });
+ ($fmt:expr, $($arg:tt)*) => ({
+ panic!($crate::concat!("internal error: entered unreachable code: ", $fmt), $($arg)*)
+ });
+}
+
+/// Indicates unimplemented code by panicking with a message of "not implemented".
+///
+/// This allows your code to type-check, which is useful if you are prototyping or
+/// implementing a trait that requires multiple methods which you don't plan of using all of.
+///
+/// The difference between `unimplemented!` and [`todo!`](macro.todo.html) is that while `todo!`
+/// conveys an intent of implementing the functionality later and the message is "not yet
+/// implemented", `unimplemented!` makes no such claims. Its message is "not implemented".
+/// Also some IDEs will mark `todo!`s.
+///
+/// # Panics
+///
+/// This will always [panic!](macro.panic.html) because `unimplemented!` is just a
+/// shorthand for `panic!` with a fixed, specific message.
+///
+/// Like `panic!`, this macro has a second form for displaying custom values.
+///
+/// # Examples
+///
+/// Say we have a trait `Foo`:
+///
+/// ```
+/// trait Foo {
+/// fn bar(&self) -> u8;
+/// fn baz(&self);
+/// fn qux(&self) -> Result<u64, ()>;
+/// }
+/// ```
+///
+/// We want to implement `Foo` for 'MyStruct', but for some reason it only makes sense
+/// to implement the `bar()` function. `baz()` and `qux()` will still need to be defined
+/// in our implementation of `Foo`, but we can use `unimplemented!` in their definitions
+/// to allow our code to compile.
+///
+/// We still want to have our program stop running if the unimplemented methods are
+/// reached.
+///
+/// ```
+/// # trait Foo {
+/// # fn bar(&self) -> u8;
+/// # fn baz(&self);
+/// # fn qux(&self) -> Result<u64, ()>;
+/// # }
+/// struct MyStruct;
+///
+/// impl Foo for MyStruct {
+/// fn bar(&self) -> u8 {
+/// 1 + 1
+/// }
+///
+/// fn baz(&self) {
+/// // It makes no sense to `baz` a `MyStruct`, so we have no logic here
+/// // at all.
+/// // This will display "thread 'main' panicked at 'not implemented'".
+/// unimplemented!();
+/// }
+///
+/// fn qux(&self) -> Result<u64, ()> {
+/// // We have some logic here,
+/// // We can add a message to unimplemented! to display our omission.
+/// // This will display:
+/// // "thread 'main' panicked at 'not implemented: MyStruct isn't quxable'".
+/// unimplemented!("MyStruct isn't quxable");
+/// }
+/// }
+///
+/// fn main() {
+/// let s = MyStruct;
+/// s.bar();
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+macro_rules! unimplemented {
+ () => (panic!("not implemented"));
+ ($($arg:tt)+) => (panic!("not implemented: {}", $crate::format_args!($($arg)+)));
+}
+
+/// Indicates unfinished code.
+///
+/// This can be useful if you are prototyping and are just looking to have your
+/// code typecheck.
+///
+/// The difference between [`unimplemented!`] and `todo!` is that while `todo!` conveys
+/// an intent of implementing the functionality later and the message is "not yet
+/// implemented", `unimplemented!` makes no such claims. Its message is "not implemented".
+/// Also some IDEs will mark `todo!`s.
+///
+/// # Panics
+///
+/// This will always [panic!](macro.panic.html)
+///
+/// # Examples
+///
+/// Here's an example of some in-progress code. We have a trait `Foo`:
+///
+/// ```
+/// trait Foo {
+/// fn bar(&self);
+/// fn baz(&self);
+/// }
+/// ```
+///
+/// We want to implement `Foo` on one of our types, but we also want to work on
+/// just `bar()` first. In order for our code to compile, we need to implement
+/// `baz()`, so we can use `todo!`:
+///
+/// ```
+/// # trait Foo {
+/// # fn bar(&self);
+/// # fn baz(&self);
+/// # }
+/// struct MyStruct;
+///
+/// impl Foo for MyStruct {
+/// fn bar(&self) {
+/// // implementation goes here
+/// }
+///
+/// fn baz(&self) {
+/// // let's not worry about implementing baz() for now
+/// todo!();
+/// }
+/// }
+///
+/// fn main() {
+/// let s = MyStruct;
+/// s.bar();
+///
+/// // we aren't even using baz(), so this is fine.
+/// }
+/// ```
+#[macro_export]
+#[stable(feature = "todo_macro", since = "1.40.0")]
+macro_rules! todo {
+ () => (panic!("not yet implemented"));
+ ($($arg:tt)+) => (panic!("not yet implemented: {}", $crate::format_args!($($arg)+)));
+}
+
+/// Definitions of built-in macros.
+///
+/// Most of the macro properties (stability, visibility, etc.) are taken from the source code here,
+/// with exception of expansion functions transforming macro inputs into outputs,
+/// those functions are provided by the compiler.
+pub(crate) mod builtin {
+
+ /// Causes compilation to fail with the given error message when encountered.
+ ///
+ /// This macro should be used when a crate uses a conditional compilation strategy to provide
+ /// better error messages for erroneous conditions. It's the compiler-level form of [`panic!`],
+ /// but emits an error during *compilation* rather than at *runtime*.
+ ///
+ /// # Examples
+ ///
+ /// Two such examples are macros and `#[cfg]` environments.
+ ///
+ /// Emit better compiler error if a macro is passed invalid values. Without the final branch,
+ /// the compiler would still emit an error, but the error's message would not mention the two
+ /// valid values.
+ ///
+ /// ```compile_fail
+ /// macro_rules! give_me_foo_or_bar {
+ /// (foo) => {};
+ /// (bar) => {};
+ /// ($x:ident) => {
+ /// compile_error!("This macro only accepts `foo` or `bar`");
+ /// }
+ /// }
+ ///
+ /// give_me_foo_or_bar!(neither);
+ /// // ^ will fail at compile time with message "This macro only accepts `foo` or `bar`"
+ /// ```
+ ///
+ /// Emit compiler error if one of a number of features isn't available.
+ ///
+ /// ```compile_fail
+ /// #[cfg(not(any(feature = "foo", feature = "bar")))]
+ /// compile_error!("Either feature \"foo\" or \"bar\" must be enabled for this crate.");
+ /// ```
+ #[stable(feature = "compile_error_macro", since = "1.20.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! compile_error {
+ ($msg:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Constructs parameters for the other string-formatting macros.
+ ///
+ /// This macro functions by taking a formatting string literal containing
+ /// `{}` for each additional argument passed. `format_args!` prepares the
+ /// additional parameters to ensure the output can be interpreted as a string
+ /// and canonicalizes the arguments into a single type. Any value that implements
+ /// the [`Display`] trait can be passed to `format_args!`, as can any
+ /// [`Debug`] implementation be passed to a `{:?}` within the formatting string.
+ ///
+ /// This macro produces a value of type [`fmt::Arguments`]. This value can be
+ /// passed to the macros within [`std::fmt`] for performing useful redirection.
+ /// All other formatting macros ([`format!`], [`write!`], [`println!`], etc) are
+ /// proxied through this one. `format_args!`, unlike its derived macros, avoids
+ /// heap allocations.
+ ///
+ /// You can use the [`fmt::Arguments`] value that `format_args!` returns
+ /// in `Debug` and `Display` contexts as seen below. The example also shows
+ /// that `Debug` and `Display` format to the same thing: the interpolated
+ /// format string in `format_args!`.
+ ///
+ /// ```rust
+ /// let debug = format!("{:?}", format_args!("{} foo {:?}", 1, 2));
+ /// let display = format!("{}", format_args!("{} foo {:?}", 1, 2));
+ /// assert_eq!("1 foo 2", display);
+ /// assert_eq!(display, debug);
+ /// ```
+ ///
+ /// For more information, see the documentation in [`std::fmt`].
+ ///
+ /// [`Display`]: crate::fmt::Display
+ /// [`Debug`]: crate::fmt::Debug
+ /// [`fmt::Arguments`]: crate::fmt::Arguments
+ /// [`std::fmt`]: crate::fmt
+ /// [`format!`]: ../std/macro.format.html
+ /// [`println!`]: ../std/macro.println.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::fmt;
+ ///
+ /// let s = fmt::format(format_args!("hello {}", "world"));
+ /// assert_eq!(s, format!("hello {}", "world"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(fmt_internals)]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! format_args {
+ ($fmt:expr) => {{ /* compiler built-in */ }};
+ ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }};
+ }
+
+ /// Same as `format_args`, but adds a newline in the end.
+ #[unstable(
+ feature = "format_args_nl",
+ issue = "none",
+ reason = "`format_args_nl` is only for internal \
+ language use and is subject to change"
+ )]
+ #[allow_internal_unstable(fmt_internals)]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! format_args_nl {
+ ($fmt:expr) => {{ /* compiler built-in */ }};
+ ($fmt:expr, $($args:tt)*) => {{ /* compiler built-in */ }};
+ }
+
+ /// Inspects an environment variable at compile time.
+ ///
+ /// This macro will expand to the value of the named environment variable at
+ /// compile time, yielding an expression of type `&'static str`.
+ ///
+ /// If the environment variable is not defined, then a compilation error
+ /// will be emitted. To not emit a compile error, use the [`option_env!`]
+ /// macro instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let path: &'static str = env!("PATH");
+ /// println!("the $PATH variable at the time of compiling was: {}", path);
+ /// ```
+ ///
+ /// You can customize the error message by passing a string as the second
+ /// parameter:
+ ///
+ /// ```compile_fail
+ /// let doc: &'static str = env!("documentation", "what's that?!");
+ /// ```
+ ///
+ /// If the `documentation` environment variable is not defined, you'll get
+ /// the following error:
+ ///
+ /// ```text
+ /// error: what's that?!
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! env {
+ ($name:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Optionally inspects an environment variable at compile time.
+ ///
+ /// If the named environment variable is present at compile time, this will
+ /// expand into an expression of type `Option<&'static str>` whose value is
+ /// `Some` of the value of the environment variable. If the environment
+ /// variable is not present, then this will expand to `None`. See
+ /// [`Option<T>`][Option] for more information on this type.
+ ///
+ /// A compile time error is never emitted when using this macro regardless
+ /// of whether the environment variable is present or not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let key: Option<&'static str> = option_env!("SECRET_KEY");
+ /// println!("the secret key might be: {:?}", key);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! option_env {
+ ($name:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Concatenates identifiers into one identifier.
+ ///
+ /// This macro takes any number of comma-separated identifiers, and
+ /// concatenates them all into one, yielding an expression which is a new
+ /// identifier. Note that hygiene makes it such that this macro cannot
+ /// capture local variables. Also, as a general rule, macros are only
+ /// allowed in item, statement or expression position. That means while
+ /// you may use this macro for referring to existing variables, functions or
+ /// modules etc, you cannot define a new one with it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(concat_idents)]
+ ///
+ /// # fn main() {
+ /// fn foobar() -> u32 { 23 }
+ ///
+ /// let f = concat_idents!(foo, bar);
+ /// println!("{}", f());
+ ///
+ /// // fn concat_idents!(new, fun, name) { } // not usable in this way!
+ /// # }
+ /// ```
+ #[unstable(
+ feature = "concat_idents",
+ issue = "29599",
+ reason = "`concat_idents` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! concat_idents {
+ ($($e:ident),+ $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Concatenates literals into a static string slice.
+ ///
+ /// This macro takes any number of comma-separated literals, yielding an
+ /// expression of type `&'static str` which represents all of the literals
+ /// concatenated left-to-right.
+ ///
+ /// Integer and floating point literals are stringified in order to be
+ /// concatenated.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = concat!("test", 10, 'b', true);
+ /// assert_eq!(s, "test10btrue");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! concat {
+ ($($e:expr),* $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Expands to the line number on which it was invoked.
+ ///
+ /// With [`column!`] and [`file!`], these macros provide debugging information for
+ /// developers about the location within the source.
+ ///
+ /// The expanded expression has type `u32` and is 1-based, so the first line
+ /// in each file evaluates to 1, the second to 2, etc. This is consistent
+ /// with error messages by common compilers or popular editors.
+ /// The returned line is *not necessarily* the line of the `line!` invocation itself,
+ /// but rather the first macro invocation leading up to the invocation
+ /// of the `line!` macro.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let current_line = line!();
+ /// println!("defined on line: {}", current_line);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! line {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Expands to the column number at which it was invoked.
+ ///
+ /// With [`line!`] and [`file!`], these macros provide debugging information for
+ /// developers about the location within the source.
+ ///
+ /// The expanded expression has type `u32` and is 1-based, so the first column
+ /// in each line evaluates to 1, the second to 2, etc. This is consistent
+ /// with error messages by common compilers or popular editors.
+ /// The returned column is *not necessarily* the line of the `column!` invocation itself,
+ /// but rather the first macro invocation leading up to the invocation
+ /// of the `column!` macro.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let current_col = column!();
+ /// println!("defined on column: {}", current_col);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! column {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Expands to the file name in which it was invoked.
+ ///
+ /// With [`line!`] and [`column!`], these macros provide debugging information for
+ /// developers about the location within the source.
+ ///
+ /// The expanded expression has type `&'static str`, and the returned file
+ /// is not the invocation of the `file!` macro itself, but rather the
+ /// first macro invocation leading up to the invocation of the `file!`
+ /// macro.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let this_file = file!();
+ /// println!("defined in file: {}", this_file);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! file {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Stringifies its arguments.
+ ///
+ /// This macro will yield an expression of type `&'static str` which is the
+ /// stringification of all the tokens passed to the macro. No restrictions
+ /// are placed on the syntax of the macro invocation itself.
+ ///
+ /// Note that the expanded results of the input tokens may change in the
+ /// future. You should be careful if you rely on the output.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let one_plus_one = stringify!(1 + 1);
+ /// assert_eq!(one_plus_one, "1 + 1");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! stringify {
+ ($($t:tt)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Includes a UTF-8 encoded file as a string.
+ ///
+ /// The file is located relative to the current file (similarly to how
+ /// modules are found). The provided path is interpreted in a platform-specific
+ /// way at compile time. So, for instance, an invocation with a Windows path
+ /// containing backslashes `\` would not compile correctly on Unix.
+ ///
+ /// This macro will yield an expression of type `&'static str` which is the
+ /// contents of the file.
+ ///
+ /// # Examples
+ ///
+ /// Assume there are two files in the same directory with the following
+ /// contents:
+ ///
+ /// File 'spanish.in':
+ ///
+ /// ```text
+ /// adiós
+ /// ```
+ ///
+ /// File 'main.rs':
+ ///
+ /// ```ignore (cannot-doctest-external-file-dependency)
+ /// fn main() {
+ /// let my_str = include_str!("spanish.in");
+ /// assert_eq!(my_str, "adiós\n");
+ /// print!("{}", my_str);
+ /// }
+ /// ```
+ ///
+ /// Compiling 'main.rs' and running the resulting binary will print "adiós".
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! include_str {
+ ($file:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Includes a file as a reference to a byte array.
+ ///
+ /// The file is located relative to the current file (similarly to how
+ /// modules are found). The provided path is interpreted in a platform-specific
+ /// way at compile time. So, for instance, an invocation with a Windows path
+ /// containing backslashes `\` would not compile correctly on Unix.
+ ///
+ /// This macro will yield an expression of type `&'static [u8; N]` which is
+ /// the contents of the file.
+ ///
+ /// # Examples
+ ///
+ /// Assume there are two files in the same directory with the following
+ /// contents:
+ ///
+ /// File 'spanish.in':
+ ///
+ /// ```text
+ /// adiós
+ /// ```
+ ///
+ /// File 'main.rs':
+ ///
+ /// ```ignore (cannot-doctest-external-file-dependency)
+ /// fn main() {
+ /// let bytes = include_bytes!("spanish.in");
+ /// assert_eq!(bytes, b"adi\xc3\xb3s\n");
+ /// print!("{}", String::from_utf8_lossy(bytes));
+ /// }
+ /// ```
+ ///
+ /// Compiling 'main.rs' and running the resulting binary will print "adiós".
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! include_bytes {
+ ($file:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Expands to a string that represents the current module path.
+ ///
+ /// The current module path can be thought of as the hierarchy of modules
+ /// leading back up to the crate root. The first component of the path
+ /// returned is the name of the crate currently being compiled.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// mod test {
+ /// pub fn foo() {
+ /// assert!(module_path!().ends_with("test"));
+ /// }
+ /// }
+ ///
+ /// test::foo();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! module_path {
+ () => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Evaluates boolean combinations of configuration flags at compile-time.
+ ///
+ /// In addition to the `#[cfg]` attribute, this macro is provided to allow
+ /// boolean expression evaluation of configuration flags. This frequently
+ /// leads to less duplicated code.
+ ///
+ /// The syntax given to this macro is the same syntax as the [`cfg`]
+ /// attribute.
+ ///
+ /// `cfg!`, unlike `#[cfg]`, does not remove any code and only evaluates to true or false. For
+ /// example, all blocks in an if/else expression need to be valid when `cfg!` is used for
+ /// the condition, regardless of what `cfg!` is evaluating.
+ ///
+ /// [`cfg`]: ../reference/conditional-compilation.html#the-cfg-attribute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let my_directory = if cfg!(windows) {
+ /// "windows-specific-directory"
+ /// } else {
+ /// "unix-directory"
+ /// };
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! cfg {
+ ($($cfg:tt)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Parses a file as an expression or an item according to the context.
+ ///
+ /// The file is located relative to the current file (similarly to how
+ /// modules are found). The provided path is interpreted in a platform-specific
+ /// way at compile time. So, for instance, an invocation with a Windows path
+ /// containing backslashes `\` would not compile correctly on Unix.
+ ///
+ /// Using this macro is often a bad idea, because if the file is
+ /// parsed as an expression, it is going to be placed in the
+ /// surrounding code unhygienically. This could result in variables
+ /// or functions being different from what the file expected if
+ /// there are variables or functions that have the same name in
+ /// the current file.
+ ///
+ /// # Examples
+ ///
+ /// Assume there are two files in the same directory with the following
+ /// contents:
+ ///
+ /// File 'monkeys.in':
+ ///
+ /// ```ignore (only-for-syntax-highlight)
+ /// ['🙈', '🙊', '🙉']
+ /// .iter()
+ /// .cycle()
+ /// .take(6)
+ /// .collect::<String>()
+ /// ```
+ ///
+ /// File 'main.rs':
+ ///
+ /// ```ignore (cannot-doctest-external-file-dependency)
+ /// fn main() {
+ /// let my_string = include!("monkeys.in");
+ /// assert_eq!("🙈🙊🙉🙈🙊🙉", my_string);
+ /// println!("{}", my_string);
+ /// }
+ /// ```
+ ///
+ /// Compiling 'main.rs' and running the resulting binary will print
+ /// "🙈🙊🙉🙈🙊🙉".
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! include {
+ ($file:expr $(,)?) => {{ /* compiler built-in */ }};
+ }
+
+ /// Asserts that a boolean expression is `true` at runtime.
+ ///
+ /// This will invoke the [`panic!`] macro if the provided expression cannot be
+ /// evaluated to `true` at runtime.
+ ///
+ /// # Uses
+ ///
+ /// Assertions are always checked in both debug and release builds, and cannot
+ /// be disabled. See [`debug_assert!`] for assertions that are not enabled in
+ /// release builds by default.
+ ///
+ /// Unsafe code may rely on `assert!` to enforce run-time invariants that, if
+ /// violated could lead to unsafety.
+ ///
+ /// Other use-cases of `assert!` include testing and enforcing run-time
+ /// invariants in safe code (whose violation cannot result in unsafety).
+ ///
+ /// # Custom Messages
+ ///
+ /// This macro has a second form, where a custom panic message can
+ /// be provided with or without arguments for formatting. See [`std::fmt`]
+ /// for syntax for this form.
+ ///
+ /// [`std::fmt`]: crate::fmt
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // the panic message for these assertions is the stringified value of the
+ /// // expression given.
+ /// assert!(true);
+ ///
+ /// fn some_computation() -> bool { true } // a very simple function
+ ///
+ /// assert!(some_computation());
+ ///
+ /// // assert with a custom message
+ /// let x = true;
+ /// assert!(x, "x wasn't true!");
+ ///
+ /// let a = 3; let b = 27;
+ /// assert!(a + b == 30, "a = {}, b = {}", a, b);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! assert {
+ ($cond:expr $(,)?) => {{ /* compiler built-in */ }};
+ ($cond:expr, $($arg:tt)+) => {{ /* compiler built-in */ }};
+ }
+
+ /// Inline assembly.
+ ///
+ /// Read the [unstable book] for the usage.
+ ///
+ /// [unstable book]: ../unstable-book/library-features/asm.html
+ #[unstable(
+ feature = "asm",
+ issue = "72016",
+ reason = "inline assembly is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! asm {
+ ("assembly template",
+ $(operands,)*
+ $(options($(option),*))?
+ ) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// LLVM-style inline assembly.
+ ///
+ /// Read the [unstable book] for the usage.
+ ///
+ /// [unstable book]: ../unstable-book/library-features/llvm-asm.html
+ #[unstable(
+ feature = "llvm_asm",
+ issue = "70173",
+ reason = "prefer using the new asm! syntax instead"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! llvm_asm {
+ ("assembly template"
+ : $("output"(operand),)*
+ : $("input"(operand),)*
+ : $("clobbers",)*
+ : $("options",)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Module-level inline assembly.
+ #[unstable(
+ feature = "global_asm",
+ issue = "35119",
+ reason = "`global_asm!` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! global_asm {
+ ("assembly") => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Prints passed tokens into the standard output.
+ #[unstable(
+ feature = "log_syntax",
+ issue = "29598",
+ reason = "`log_syntax!` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! log_syntax {
+ ($($arg:tt)*) => {
+ /* compiler built-in */
+ };
+ }
+
+ /// Enables or disables tracing functionality used for debugging other macros.
+ #[unstable(
+ feature = "trace_macros",
+ issue = "29598",
+ reason = "`trace_macros` is not stable enough for use and is subject to change"
+ )]
+ #[rustc_builtin_macro]
+ #[macro_export]
+ macro_rules! trace_macros {
+ (true) => {{ /* compiler built-in */ }};
+ (false) => {{ /* compiler built-in */ }};
+ }
+
+ /// Attribute macro applied to a function to turn it into a unit test.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(test, rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro test($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Attribute macro applied to a function to turn it into a benchmark test.
+ #[unstable(
+ feature = "test",
+ issue = "50297",
+ soft,
+ reason = "`bench` is a part of custom test frameworks which are unstable"
+ )]
+ #[allow_internal_unstable(test, rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro bench($item:item) {
+ /* compiler built-in */
+ }
+
+ /// An implementation detail of the `#[test]` and `#[bench]` macros.
+ #[unstable(
+ feature = "custom_test_frameworks",
+ issue = "50297",
+ reason = "custom test frameworks are an unstable feature"
+ )]
+ #[allow_internal_unstable(test, rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro test_case($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Attribute macro applied to a static to register it as a global allocator.
+ ///
+ /// See also [`std::alloc::GlobalAlloc`](../std/alloc/trait.GlobalAlloc.html).
+ #[stable(feature = "global_allocator", since = "1.28.0")]
+ #[allow_internal_unstable(rustc_attrs)]
+ #[rustc_builtin_macro]
+ pub macro global_allocator($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Keeps the item it's applied to if the passed path is accessible, and removes it otherwise.
+ #[unstable(
+ feature = "cfg_accessible",
+ issue = "64797",
+ reason = "`cfg_accessible` is not fully implemented"
+ )]
+ #[rustc_builtin_macro]
+ pub macro cfg_accessible($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Unstable implementation detail of the `rustc` compiler, do not use.
+ #[rustc_builtin_macro]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(core_intrinsics, libstd_sys_internals)]
+ pub macro RustcDecodable($item:item) {
+ /* compiler built-in */
+ }
+
+ /// Unstable implementation detail of the `rustc` compiler, do not use.
+ #[rustc_builtin_macro]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow_internal_unstable(core_intrinsics)]
+ pub macro RustcEncodable($item:item) {
+ /* compiler built-in */
+ }
+}
--- /dev/null
+Panics the current thread.
+
+This allows a program to terminate immediately and provide feedback
+to the caller of the program. `panic!` should be used when a program reaches
+an unrecoverable state.
+
+This macro is the perfect way to assert conditions in example code and in
+tests. `panic!` is closely tied with the `unwrap` method of both
+[`Option`][ounwrap] and [`Result`][runwrap] enums. Both implementations call
+`panic!` when they are set to [`None`] or [`Err`] variants.
+
+This macro is used to inject panic into a Rust thread, causing the thread to
+panic entirely. Each thread's panic can be reaped as the [`Box`]`<`[`Any`]`>` type,
+and the single-argument form of the `panic!` macro will be the value which
+is transmitted.
+
+[`Result`] enum is often a better solution for recovering from errors than
+using the `panic!` macro. This macro should be used to avoid proceeding using
+incorrect values, such as from external sources. Detailed information about
+error handling is found in the [book].
+
+The multi-argument form of this macro panics with a string and has the
+[`format!`] syntax for building a string.
+
+See also the macro [`compile_error!`], for raising errors during compilation.
+
+[ounwrap]: Option::unwrap
+[runwrap]: Result::unwrap
+[`Box`]: ../std/boxed/struct.Box.html
+[`Any`]: crate::any::Any
+[`format!`]: ../std/macro.format.html
+[book]: ../book/ch09-00-error-handling.html
+
+# Current implementation
+
+If the main thread panics it will terminate all your threads and end your
+program with code `101`.
+
+# Examples
+
+```should_panic
+# #![allow(unreachable_code)]
+panic!();
+panic!("this is a terrible mistake!");
+panic!(4); // panic with the value of 4 to be collected elsewhere
+panic!("this is a {} {message}", "fancy", message = "message");
+```
--- /dev/null
+//! Primitive traits and types representing basic properties of types.
+//!
+//! Rust types can be classified in various useful ways according to
+//! their intrinsic properties. These classifications are represented
+//! as traits.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cell::UnsafeCell;
+use crate::cmp;
+use crate::fmt::Debug;
+use crate::hash::Hash;
+use crate::hash::Hasher;
+
+/// Types that can be transferred across thread boundaries.
+///
+/// This trait is automatically implemented when the compiler determines it's
+/// appropriate.
+///
+/// An example of a non-`Send` type is the reference-counting pointer
+/// [`rc::Rc`][`Rc`]. If two threads attempt to clone [`Rc`]s that point to the same
+/// reference-counted value, they might try to update the reference count at the
+/// same time, which is [undefined behavior][ub] because [`Rc`] doesn't use atomic
+/// operations. Its cousin [`sync::Arc`][arc] does use atomic operations (incurring
+/// some overhead) and thus is `Send`.
+///
+/// See [the Nomicon](../../nomicon/send-and-sync.html) for more details.
+///
+/// [`Rc`]: ../../std/rc/struct.Rc.html
+/// [arc]: ../../std/sync/struct.Arc.html
+/// [ub]: ../../reference/behavior-considered-undefined.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "send_trait")]
+#[rustc_on_unimplemented(
+ message = "`{Self}` cannot be sent between threads safely",
+ label = "`{Self}` cannot be sent between threads safely"
+)]
+pub unsafe auto trait Send {
+ // empty.
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for *const T {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Send for *mut T {}
+
+/// Types with a constant size known at compile time.
+///
+/// All type parameters have an implicit bound of `Sized`. The special syntax
+/// `?Sized` can be used to remove this bound if it's not appropriate.
+///
+/// ```
+/// # #![allow(dead_code)]
+/// struct Foo<T>(T);
+/// struct Bar<T: ?Sized>(T);
+///
+/// // struct FooUse(Foo<[i32]>); // error: Sized is not implemented for [i32]
+/// struct BarUse(Bar<[i32]>); // OK
+/// ```
+///
+/// The one exception is the implicit `Self` type of a trait. A trait does not
+/// have an implicit `Sized` bound as this is incompatible with [trait object]s
+/// where, by definition, the trait needs to work with all possible implementors,
+/// and thus could be any size.
+///
+/// Although Rust will let you bind `Sized` to a trait, you won't
+/// be able to use it to form a trait object later:
+///
+/// ```
+/// # #![allow(unused_variables)]
+/// trait Foo { }
+/// trait Bar: Sized { }
+///
+/// struct Impl;
+/// impl Foo for Impl { }
+/// impl Bar for Impl { }
+///
+/// let x: &dyn Foo = &Impl; // OK
+/// // let y: &dyn Bar = &Impl; // error: the trait `Bar` cannot
+/// // be made into an object
+/// ```
+///
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[lang = "sized"]
+#[rustc_on_unimplemented(
+ message = "the size for values of type `{Self}` cannot be known at compilation time",
+ label = "doesn't have a size known at compile-time"
+)]
+#[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable
+#[rustc_specialization_trait]
+pub trait Sized {
+ // Empty.
+}
+
+/// Types that can be "unsized" to a dynamically-sized type.
+///
+/// For example, the sized array type `[i8; 2]` implements `Unsize<[i8]>` and
+/// `Unsize<dyn fmt::Debug>`.
+///
+/// All implementations of `Unsize` are provided automatically by the compiler.
+///
+/// `Unsize` is implemented for:
+///
+/// - `[T; N]` is `Unsize<[T]>`
+/// - `T` is `Unsize<dyn Trait>` when `T: Trait`
+/// - `Foo<..., T, ...>` is `Unsize<Foo<..., U, ...>>` if:
+/// - `T: Unsize<U>`
+/// - Foo is a struct
+/// - Only the last field of `Foo` has a type involving `T`
+/// - `T` is not part of the type of any other fields
+/// - `Bar<T>: Unsize<Bar<U>>`, if the last field of `Foo` has type `Bar<T>`
+///
+/// `Unsize` is used along with [`ops::CoerceUnsized`] to allow
+/// "user-defined" containers such as [`Rc`] to contain dynamically-sized
+/// types. See the [DST coercion RFC][RFC982] and [the nomicon entry on coercion][nomicon-coerce]
+/// for more details.
+///
+/// [`ops::CoerceUnsized`]: crate::ops::CoerceUnsized
+/// [`Rc`]: ../../std/rc/struct.Rc.html
+/// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
+/// [nomicon-coerce]: ../../nomicon/coercions.html
+#[unstable(feature = "unsize", issue = "27732")]
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {
+ // Empty.
+}
+
+/// Required trait for constants used in pattern matches.
+///
+/// Any type that derives `PartialEq` automatically implements this trait,
+/// *regardless* of whether its type-parameters implement `Eq`.
+///
+/// If a `const` item contains some type that does not implement this trait,
+/// then that type either (1.) does not implement `PartialEq` (which means the
+/// constant will not provide that comparison method, which code generation
+/// assumes is available), or (2.) it implements *its own* version of
+/// `PartialEq` (which we assume does not conform to a structural-equality
+/// comparison).
+///
+/// In either of the two scenarios above, we reject usage of such a constant in
+/// a pattern match.
+///
+/// See also the [structural match RFC][RFC1445], and [issue 63438] which
+/// motivated migrating from attribute-based design to this trait.
+///
+/// [RFC1445]: https://github.com/rust-lang/rfcs/blob/master/text/1445-restrict-constants-in-patterns.md
+/// [issue 63438]: https://github.com/rust-lang/rust/issues/63438
+#[unstable(feature = "structural_match", issue = "31434")]
+#[rustc_on_unimplemented(message = "the type `{Self}` does not `#[derive(PartialEq)]`")]
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {
+ // Empty.
+}
+
+/// Required trait for constants used in pattern matches.
+///
+/// Any type that derives `Eq` automatically implements this trait, *regardless*
+/// of whether its type-parameters implement `Eq`.
+///
+/// This is a hack to workaround a limitation in our type-system.
+///
+/// Background:
+///
+/// We want to require that types of consts used in pattern matches
+/// have the attribute `#[derive(PartialEq, Eq)]`.
+///
+/// In a more ideal world, we could check that requirement by just checking that
+/// the given type implements both (1.) the `StructuralPartialEq` trait *and*
+/// (2.) the `Eq` trait. However, you can have ADTs that *do* `derive(PartialEq, Eq)`,
+/// and be a case that we want the compiler to accept, and yet the constant's
+/// type fails to implement `Eq`.
+///
+/// Namely, a case like this:
+///
+/// ```rust
+/// #[derive(PartialEq, Eq)]
+/// struct Wrap<X>(X);
+/// fn higher_order(_: &()) { }
+/// const CFN: Wrap<fn(&())> = Wrap(higher_order);
+/// fn main() {
+/// match CFN {
+/// CFN => {}
+/// _ => {}
+/// }
+/// }
+/// ```
+///
+/// (The problem in the above code is that `Wrap<fn(&())>` does not implement
+/// `PartialEq`, nor `Eq`, because `for<'a> fn(&'a _)` does not implement those
+/// traits.)
+///
+/// Therefore, we cannot rely on naive check for `StructuralPartialEq` and
+/// mere `Eq`.
+///
+/// As a hack to work around this, we use two separate traits injected by each
+/// of the two derives (`#[derive(PartialEq)]` and `#[derive(Eq)]`) and check
+/// that both of them are present as part of structural-match checking.
+#[unstable(feature = "structural_match", issue = "31434")]
+#[rustc_on_unimplemented(message = "the type `{Self}` does not `#[derive(Eq)]`")]
+#[lang = "structural_teq"]
+pub trait StructuralEq {
+ // Empty.
+}
+
+/// Types whose values can be duplicated simply by copying bits.
+///
+/// By default, variable bindings have 'move semantics.' In other
+/// words:
+///
+/// ```
+/// #[derive(Debug)]
+/// struct Foo;
+///
+/// let x = Foo;
+///
+/// let y = x;
+///
+/// // `x` has moved into `y`, and so cannot be used
+///
+/// // println!("{:?}", x); // error: use of moved value
+/// ```
+///
+/// However, if a type implements `Copy`, it instead has 'copy semantics':
+///
+/// ```
+/// // We can derive a `Copy` implementation. `Clone` is also required, as it's
+/// // a supertrait of `Copy`.
+/// #[derive(Debug, Copy, Clone)]
+/// struct Foo;
+///
+/// let x = Foo;
+///
+/// let y = x;
+///
+/// // `y` is a copy of `x`
+///
+/// println!("{:?}", x); // A-OK!
+/// ```
+///
+/// It's important to note that in these two examples, the only difference is whether you
+/// are allowed to access `x` after the assignment. Under the hood, both a copy and a move
+/// can result in bits being copied in memory, although this is sometimes optimized away.
+///
+/// ## How can I implement `Copy`?
+///
+/// There are two ways to implement `Copy` on your type. The simplest is to use `derive`:
+///
+/// ```
+/// #[derive(Copy, Clone)]
+/// struct MyStruct;
+/// ```
+///
+/// You can also implement `Copy` and `Clone` manually:
+///
+/// ```
+/// struct MyStruct;
+///
+/// impl Copy for MyStruct { }
+///
+/// impl Clone for MyStruct {
+/// fn clone(&self) -> MyStruct {
+/// *self
+/// }
+/// }
+/// ```
+///
+/// There is a small difference between the two: the `derive` strategy will also place a `Copy`
+/// bound on type parameters, which isn't always desired.
+///
+/// ## What's the difference between `Copy` and `Clone`?
+///
+/// Copies happen implicitly, for example as part of an assignment `y = x`. The behavior of
+/// `Copy` is not overloadable; it is always a simple bit-wise copy.
+///
+/// Cloning is an explicit action, `x.clone()`. The implementation of [`Clone`] can
+/// provide any type-specific behavior necessary to duplicate values safely. For example,
+/// the implementation of [`Clone`] for [`String`] needs to copy the pointed-to string
+/// buffer in the heap. A simple bitwise copy of [`String`] values would merely copy the
+/// pointer, leading to a double free down the line. For this reason, [`String`] is [`Clone`]
+/// but not `Copy`.
+///
+/// [`Clone`] is a supertrait of `Copy`, so everything which is `Copy` must also implement
+/// [`Clone`]. If a type is `Copy` then its [`Clone`] implementation only needs to return `*self`
+/// (see the example above).
+///
+/// ## When can my type be `Copy`?
+///
+/// A type can implement `Copy` if all of its components implement `Copy`. For example, this
+/// struct can be `Copy`:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// #[derive(Copy, Clone)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+/// ```
+///
+/// A struct can be `Copy`, and [`i32`] is `Copy`, therefore `Point` is eligible to be `Copy`.
+/// By contrast, consider
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # struct Point;
+/// struct PointList {
+/// points: Vec<Point>,
+/// }
+/// ```
+///
+/// The struct `PointList` cannot implement `Copy`, because [`Vec<T>`] is not `Copy`. If we
+/// attempt to derive a `Copy` implementation, we'll get an error:
+///
+/// ```text
+/// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy`
+/// ```
+///
+/// Shared references (`&T`) are also `Copy`, so a type can be `Copy`, even when it holds
+/// shared references of types `T` that are *not* `Copy`. Consider the following struct,
+/// which can implement `Copy`, because it only holds a *shared reference* to our non-`Copy`
+/// type `PointList` from above:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # struct PointList;
+/// #[derive(Copy, Clone)]
+/// struct PointListWrapper<'a> {
+/// point_list_ref: &'a PointList,
+/// }
+/// ```
+///
+/// ## When *can't* my type be `Copy`?
+///
+/// Some types can't be copied safely. For example, copying `&mut T` would create an aliased
+/// mutable reference. Copying [`String`] would duplicate responsibility for managing the
+/// [`String`]'s buffer, leading to a double free.
+///
+/// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's
+/// managing some resource besides its own [`size_of::<T>`] bytes.
+///
+/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get
+/// the error [E0204].
+///
+/// [E0204]: ../../error-index.html#E0204
+///
+/// ## When *should* my type be `Copy`?
+///
+/// Generally speaking, if your type _can_ implement `Copy`, it should. Keep in mind, though,
+/// that implementing `Copy` is part of the public API of your type. If the type might become
+/// non-`Copy` in the future, it could be prudent to omit the `Copy` implementation now, to
+/// avoid a breaking API change.
+///
+/// ## Additional implementors
+///
+/// In addition to the [implementors listed below][impls],
+/// the following types also implement `Copy`:
+///
+/// * Function item types (i.e., the distinct types defined for each function)
+/// * Function pointer types (e.g., `fn() -> i32`)
+/// * Array types, for all sizes, if the item type also implements `Copy` (e.g., `[i32; 123456]`)
+/// * Tuple types, if each component also implements `Copy` (e.g., `()`, `(i32, bool)`)
+/// * Closure types, if they capture no value from the environment
+/// or if all such captured values implement `Copy` themselves.
+/// Note that variables captured by shared reference always implement `Copy`
+/// (even if the referent doesn't),
+/// while variables captured by mutable reference never implement `Copy`.
+///
+/// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+/// [`String`]: ../../std/string/struct.String.html
+/// [`size_of::<T>`]: crate::mem::size_of
+/// [impls]: #implementors
+#[stable(feature = "rust1", since = "1.0.0")]
+#[lang = "copy"]
+// FIXME(matthewjasper) This allows copying a type that doesn't implement
+// `Copy` because of unsatisfied lifetime bounds (copying `A<'_>` when only
+// `A<'static>: Copy` and `A<'_>: Clone`).
+// We have this attribute here for now only because there are quite a few
+// existing specializations on `Copy` that already exist in the standard
+// library, and there's no way to safely have this behavior right now.
+#[rustc_unsafe_specialization_marker]
+pub trait Copy: Clone {
+ // Empty.
+}
+
+/// Derive macro generating an impl of the trait `Copy`.
+#[rustc_builtin_macro]
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow_internal_unstable(core_intrinsics, derive_clone_copy)]
+pub macro Copy($item:item) {
+ /* compiler built-in */
+}
+
+/// Types for which it is safe to share references between threads.
+///
+/// This trait is automatically implemented when the compiler determines
+/// it's appropriate.
+///
+/// The precise definition is: a type `T` is [`Sync`] if and only if `&T` is
+/// [`Send`]. In other words, if there is no possibility of
+/// [undefined behavior][ub] (including data races) when passing
+/// `&T` references between threads.
+///
+/// As one would expect, primitive types like [`u8`] and [`f64`]
+/// are all [`Sync`], and so are simple aggregate types containing them,
+/// like tuples, structs and enums. More examples of basic [`Sync`]
+/// types include "immutable" types like `&T`, and those with simple
+/// inherited mutability, such as [`Box<T>`][box], [`Vec<T>`][vec] and
+/// most other collection types. (Generic parameters need to be [`Sync`]
+/// for their container to be [`Sync`].)
+///
+/// A somewhat surprising consequence of the definition is that `&mut T`
+/// is `Sync` (if `T` is `Sync`) even though it seems like that might
+/// provide unsynchronized mutation. The trick is that a mutable
+/// reference behind a shared reference (that is, `& &mut T`)
+/// becomes read-only, as if it were a `& &T`. Hence there is no risk
+/// of a data race.
+///
+/// Types that are not `Sync` are those that have "interior
+/// mutability" in a non-thread-safe form, such as [`Cell`][cell]
+/// and [`RefCell`][refcell]. These types allow for mutation of
+/// their contents even through an immutable, shared reference. For
+/// example the `set` method on [`Cell<T>`][cell] takes `&self`, so it requires
+/// only a shared reference [`&Cell<T>`][cell]. The method performs no
+/// synchronization, thus [`Cell`][cell] cannot be `Sync`.
+///
+/// Another example of a non-`Sync` type is the reference-counting
+/// pointer [`Rc`][rc]. Given any reference [`&Rc<T>`][rc], you can clone
+/// a new [`Rc<T>`][rc], modifying the reference counts in a non-atomic way.
+///
+/// For cases when one does need thread-safe interior mutability,
+/// Rust provides [atomic data types], as well as explicit locking via
+/// [`sync::Mutex`][mutex] and [`sync::RwLock`][rwlock]. These types
+/// ensure that any mutation cannot cause data races, hence the types
+/// are `Sync`. Likewise, [`sync::Arc`][arc] provides a thread-safe
+/// analogue of [`Rc`][rc].
+///
+/// Any types with interior mutability must also use the
+/// [`cell::UnsafeCell`][unsafecell] wrapper around the value(s) which
+/// can be mutated through a shared reference. Failing to doing this is
+/// [undefined behavior][ub]. For example, [`transmute`][transmute]-ing
+/// from `&T` to `&mut T` is invalid.
+///
+/// See [the Nomicon][nomicon-send-and-sync] for more details about `Sync`.
+///
+/// [box]: ../../std/boxed/struct.Box.html
+/// [vec]: ../../std/vec/struct.Vec.html
+/// [cell]: crate::cell::Cell
+/// [refcell]: crate::cell::RefCell
+/// [rc]: ../../std/rc/struct.Rc.html
+/// [arc]: ../../std/sync/struct.Arc.html
+/// [atomic data types]: crate::sync::atomic
+/// [mutex]: ../../std/sync/struct.Mutex.html
+/// [rwlock]: ../../std/sync/struct.RwLock.html
+/// [unsafecell]: crate::cell::UnsafeCell
+/// [ub]: ../../reference/behavior-considered-undefined.html
+/// [transmute]: crate::mem::transmute
+/// [nomicon-send-and-sync]: ../../nomicon/send-and-sync.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "sync_trait")]
+#[lang = "sync"]
+#[rustc_on_unimplemented(
+ message = "`{Self}` cannot be shared between threads safely",
+ label = "`{Self}` cannot be shared between threads safely"
+)]
+pub unsafe auto trait Sync {
+ // FIXME(estebank): once support to add notes in `rustc_on_unimplemented`
+ // lands in beta, and it has been extended to check whether a closure is
+ // anywhere in the requirement chain, extend it as such (#48534):
+ // ```
+ // on(
+ // closure,
+ // note="`{Self}` cannot be shared safely, consider marking the closure `move`"
+ // ),
+ // ```
+
+ // Empty
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for *const T {}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !Sync for *mut T {}
+
+macro_rules! impls {
+ ($t: ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Hash for $t<T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, _: &mut H) {}
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::PartialEq for $t<T> {
+ fn eq(&self, _other: &$t<T>) -> bool {
+ true
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::Eq for $t<T> {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::PartialOrd for $t<T> {
+ fn partial_cmp(&self, _other: &$t<T>) -> Option<cmp::Ordering> {
+ Option::Some(cmp::Ordering::Equal)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> cmp::Ord for $t<T> {
+ fn cmp(&self, _other: &$t<T>) -> cmp::Ordering {
+ cmp::Ordering::Equal
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for $t<T> {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Clone for $t<T> {
+ fn clone(&self) -> Self {
+ Self
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Default for $t<T> {
+ fn default() -> Self {
+ Self
+ }
+ }
+
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<T: ?Sized> StructuralPartialEq for $t<T> {}
+
+ #[unstable(feature = "structural_match", issue = "31434")]
+ impl<T: ?Sized> StructuralEq for $t<T> {}
+ };
+}
+
+/// Zero-sized type used to mark things that "act like" they own a `T`.
+///
+/// Adding a `PhantomData<T>` field to your type tells the compiler that your
+/// type acts as though it stores a value of type `T`, even though it doesn't
+/// really. This information is used when computing certain safety properties.
+///
+/// For a more in-depth explanation of how to use `PhantomData<T>`, please see
+/// [the Nomicon](../../nomicon/phantom-data.html).
+///
+/// # A ghastly note 👻👻👻
+///
+/// Though they both have scary names, `PhantomData` and 'phantom types' are
+/// related, but not identical. A phantom type parameter is simply a type
+/// parameter which is never used. In Rust, this often causes the compiler to
+/// complain, and the solution is to add a "dummy" use by way of `PhantomData`.
+///
+/// # Examples
+///
+/// ## Unused lifetime parameters
+///
+/// Perhaps the most common use case for `PhantomData` is a struct that has an
+/// unused lifetime parameter, typically as part of some unsafe code. For
+/// example, here is a struct `Slice` that has two pointers of type `*const T`,
+/// presumably pointing into an array somewhere:
+///
+/// ```compile_fail,E0392
+/// struct Slice<'a, T> {
+/// start: *const T,
+/// end: *const T,
+/// }
+/// ```
+///
+/// The intention is that the underlying data is only valid for the
+/// lifetime `'a`, so `Slice` should not outlive `'a`. However, this
+/// intent is not expressed in the code, since there are no uses of
+/// the lifetime `'a` and hence it is not clear what data it applies
+/// to. We can correct this by telling the compiler to act *as if* the
+/// `Slice` struct contained a reference `&'a T`:
+///
+/// ```
+/// use std::marker::PhantomData;
+///
+/// # #[allow(dead_code)]
+/// struct Slice<'a, T: 'a> {
+/// start: *const T,
+/// end: *const T,
+/// phantom: PhantomData<&'a T>,
+/// }
+/// ```
+///
+/// This also in turn requires the annotation `T: 'a`, indicating
+/// that any references in `T` are valid over the lifetime `'a`.
+///
+/// When initializing a `Slice` you simply provide the value
+/// `PhantomData` for the field `phantom`:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # use std::marker::PhantomData;
+/// # struct Slice<'a, T: 'a> {
+/// # start: *const T,
+/// # end: *const T,
+/// # phantom: PhantomData<&'a T>,
+/// # }
+/// fn borrow_vec<T>(vec: &Vec<T>) -> Slice<'_, T> {
+/// let ptr = vec.as_ptr();
+/// Slice {
+/// start: ptr,
+/// end: unsafe { ptr.add(vec.len()) },
+/// phantom: PhantomData,
+/// }
+/// }
+/// ```
+///
+/// ## Unused type parameters
+///
+/// It sometimes happens that you have unused type parameters which
+/// indicate what type of data a struct is "tied" to, even though that
+/// data is not actually found in the struct itself. Here is an
+/// example where this arises with [FFI]. The foreign interface uses
+/// handles of type `*mut ()` to refer to Rust values of different
+/// types. We track the Rust type using a phantom type parameter on
+/// the struct `ExternalResource` which wraps a handle.
+///
+/// [FFI]: ../../book/ch19-01-unsafe-rust.html#using-extern-functions-to-call-external-code
+///
+/// ```
+/// # #![allow(dead_code)]
+/// # trait ResType { }
+/// # struct ParamType;
+/// # mod foreign_lib {
+/// # pub fn new(_: usize) -> *mut () { 42 as *mut () }
+/// # pub fn do_stuff(_: *mut (), _: usize) {}
+/// # }
+/// # fn convert_params(_: ParamType) -> usize { 42 }
+/// use std::marker::PhantomData;
+/// use std::mem;
+///
+/// struct ExternalResource<R> {
+/// resource_handle: *mut (),
+/// resource_type: PhantomData<R>,
+/// }
+///
+/// impl<R: ResType> ExternalResource<R> {
+/// fn new() -> Self {
+/// let size_of_res = mem::size_of::<R>();
+/// Self {
+/// resource_handle: foreign_lib::new(size_of_res),
+/// resource_type: PhantomData,
+/// }
+/// }
+///
+/// fn do_stuff(&self, param: ParamType) {
+/// let foreign_params = convert_params(param);
+/// foreign_lib::do_stuff(self.resource_handle, foreign_params);
+/// }
+/// }
+/// ```
+///
+/// ## Ownership and the drop check
+///
+/// Adding a field of type `PhantomData<T>` indicates that your
+/// type owns data of type `T`. This in turn implies that when your
+/// type is dropped, it may drop one or more instances of the type
+/// `T`. This has bearing on the Rust compiler's [drop check]
+/// analysis.
+///
+/// If your struct does not in fact *own* the data of type `T`, it is
+/// better to use a reference type, like `PhantomData<&'a T>`
+/// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so
+/// as not to indicate ownership.
+///
+/// [drop check]: ../../nomicon/dropck.html
+#[lang = "phantom_data"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct PhantomData<T: ?Sized>;
+
+impls! { PhantomData }
+
+mod impls {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ unsafe impl<T: Sync + ?Sized> Send for &T {}
+ #[stable(feature = "rust1", since = "1.0.0")]
+ unsafe impl<T: Send + ?Sized> Send for &mut T {}
+}
+
+/// Compiler-internal trait used to indicate the type of enum discriminants.
+///
+/// This trait is automatically implemented for every type and does not add any
+/// guarantees to [`mem::Discriminant`]. It is **undefined behavior** to transmute
+/// between `DiscriminantKind::Discriminant` and `mem::Discriminant`.
+///
+/// [`mem::Discriminant`]: crate::mem::Discriminant
+#[unstable(
+ feature = "discriminant_kind",
+ issue = "none",
+ reason = "this trait is unlikely to ever be stabilized, use `mem::discriminant` instead"
+)]
+#[lang = "discriminant_kind"]
+pub trait DiscriminantKind {
+ /// The type of the discriminant, which must satisfy the trait
+ /// bounds required by `mem::Discriminant`.
+ #[lang = "discriminant_type"]
+ type Discriminant: Clone + Copy + Debug + Eq + PartialEq + Hash + Send + Sync + Unpin;
+}
+
+/// Compiler-internal trait used to determine whether a type contains
+/// any `UnsafeCell` internally, but not through an indirection.
+/// This affects, for example, whether a `static` of that type is
+/// placed in read-only static memory or writable static memory.
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+impl<T: ?Sized> !Freeze for UnsafeCell<T> {}
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+/// Types that can be safely moved after being pinned.
+///
+/// Rust itself has no notion of immovable types, and considers moves (e.g.,
+/// through assignment or [`mem::replace`]) to always be safe.
+///
+/// The [`Pin`][Pin] type is used instead to prevent moves through the type
+/// system. Pointers `P<T>` wrapped in the [`Pin<P<T>>`][Pin] wrapper can't be
+/// moved out of. See the [`pin` module] documentation for more information on
+/// pinning.
+///
+/// Implementing the `Unpin` trait for `T` lifts the restrictions of pinning off
+/// the type, which then allows moving `T` out of [`Pin<P<T>>`][Pin] with
+/// functions such as [`mem::replace`].
+///
+/// `Unpin` has no consequence at all for non-pinned data. In particular,
+/// [`mem::replace`] happily moves `!Unpin` data (it works for any `&mut T`, not
+/// just when `T: Unpin`). However, you cannot use [`mem::replace`] on data
+/// wrapped inside a [`Pin<P<T>>`][Pin] because you cannot get the `&mut T` you
+/// need for that, and *that* is what makes this system work.
+///
+/// So this, for example, can only be done on types implementing `Unpin`:
+///
+/// ```rust
+/// # #![allow(unused_must_use)]
+/// use std::mem;
+/// use std::pin::Pin;
+///
+/// let mut string = "this".to_string();
+/// let mut pinned_string = Pin::new(&mut string);
+///
+/// // We need a mutable reference to call `mem::replace`.
+/// // We can obtain such a reference by (implicitly) invoking `Pin::deref_mut`,
+/// // but that is only possible because `String` implements `Unpin`.
+/// mem::replace(&mut *pinned_string, "other".to_string());
+/// ```
+///
+/// This trait is automatically implemented for almost every type.
+///
+/// [`mem::replace`]: crate::mem::replace
+/// [Pin]: crate::pin::Pin
+/// [`pin` module]: crate::pin
+#[stable(feature = "pin", since = "1.33.0")]
+#[rustc_on_unimplemented(
+ on(_Self = "std::future::Future", note = "consider using `Box::pin`",),
+ message = "`{Self}` cannot be unpinned"
+)]
+#[lang = "unpin"]
+pub auto trait Unpin {}
+
+/// A marker type which does not implement `Unpin`.
+///
+/// If a type contains a `PhantomPinned`, it will not implement `Unpin` by default.
+#[stable(feature = "pin", since = "1.33.0")]
+#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+pub struct PhantomPinned;
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl !Unpin for PhantomPinned {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<'a, T: ?Sized + 'a> Unpin for &'a T {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<'a, T: ?Sized + 'a> Unpin for &'a mut T {}
+
+#[stable(feature = "pin_raw", since = "1.38.0")]
+impl<T: ?Sized> Unpin for *const T {}
+
+#[stable(feature = "pin_raw", since = "1.38.0")]
+impl<T: ?Sized> Unpin for *mut T {}
+
+/// Implementations of `Copy` for primitive types.
+///
+/// Implementations that cannot be described in Rust
+/// are implemented in `traits::SelectionContext::copy_clone_conditions()`
+/// in `rustc_trait_selection`.
+mod copy_impls {
+
+ use super::Copy;
+
+ macro_rules! impl_copy {
+ ($($t:ty)*) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Copy for $t {}
+ )*
+ }
+ }
+
+ impl_copy! {
+ usize u8 u16 u32 u64 u128
+ isize i8 i16 i32 i64 i128
+ f32 f64
+ bool char
+ }
+
+ #[unstable(feature = "never_type", issue = "35121")]
+ impl Copy for ! {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for *const T {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for *mut T {}
+
+ /// Shared references can be copied, but mutable references *cannot*!
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T: ?Sized> Copy for &T {}
+}
--- /dev/null
+use crate::ops::{Deref, DerefMut};
+use crate::ptr;
+
+/// A wrapper to inhibit compiler from automatically calling `T`’s destructor.
+/// This wrapper is 0-cost.
+///
+/// `ManuallyDrop<T>` is subject to the same layout optimizations as `T`.
+/// As a consequence, it has *no effect* on the assumptions that the compiler makes
+/// about its contents. For example, initializing a `ManuallyDrop<&mut T>`
+/// with [`mem::zeroed`] is undefined behavior.
+/// If you need to handle uninitialized data, use [`MaybeUninit<T>`] instead.
+///
+/// Note that accessing the value inside a `ManuallyDrop<T>` is safe.
+/// This means that a `ManuallyDrop<T>` whose content has been dropped must not
+/// be exposed through a public safe API.
+/// Correspondingly, `ManuallyDrop::drop` is unsafe.
+///
+/// # `ManuallyDrop` and drop order.
+///
+/// Rust has a well-defined [drop order] of values. To make sure that fields or
+/// locals are dropped in a specific order, reorder the declarations such that
+/// the implicit drop order is the correct one.
+///
+/// It is possible to use `ManuallyDrop` to control the drop order, but this
+/// requires unsafe code and is hard to do correctly in the presence of
+/// unwinding.
+///
+/// For example, if you want to make sure that a specific field is dropped after
+/// the others, make it the last field of a struct:
+///
+/// ```
+/// struct Context;
+///
+/// struct Widget {
+/// children: Vec<Widget>,
+/// // `context` will be dropped after `children`.
+/// // Rust guarantees that fields are dropped in the order of declaration.
+/// context: Context,
+/// }
+/// ```
+///
+/// [drop order]: https://doc.rust-lang.org/reference/destructors.html
+/// [`mem::zeroed`]: crate::mem::zeroed
+/// [`MaybeUninit<T>`]: crate::mem::MaybeUninit
+#[stable(feature = "manually_drop", since = "1.20.0")]
+#[lang = "manually_drop"]
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ value: T,
+}
+
+impl<T> ManuallyDrop<T> {
+ /// Wrap a value to be manually dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::mem::ManuallyDrop;
+ /// let mut x = ManuallyDrop::new(String::from("Hello World!"));
+ /// x.truncate(5); // You can still safely operate on the value
+ /// assert_eq!(*x, "Hello");
+ /// // But `Drop` will not be run here
+ /// ```
+ #[must_use = "if you don't need the wrapper, you can use `mem::forget` instead"]
+ #[stable(feature = "manually_drop", since = "1.20.0")]
+ #[rustc_const_stable(feature = "const_manually_drop", since = "1.36.0")]
+ #[inline(always)]
+ pub const fn new(value: T) -> ManuallyDrop<T> {
+ ManuallyDrop { value }
+ }
+
+ /// Extracts the value from the `ManuallyDrop` container.
+ ///
+ /// This allows the value to be dropped again.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::mem::ManuallyDrop;
+ /// let x = ManuallyDrop::new(Box::new(()));
+ /// let _: Box<()> = ManuallyDrop::into_inner(x); // This drops the `Box`.
+ /// ```
+ #[stable(feature = "manually_drop", since = "1.20.0")]
+ #[rustc_const_stable(feature = "const_manually_drop", since = "1.36.0")]
+ #[inline(always)]
+ pub const fn into_inner(slot: ManuallyDrop<T>) -> T {
+ slot.value
+ }
+
+ /// Takes the value from the `ManuallyDrop<T>` container out.
+ ///
+ /// This method is primarily intended for moving out values in drop.
+ /// Instead of using [`ManuallyDrop::drop`] to manually drop the value,
+ /// you can use this method to take the value and use it however desired.
+ ///
+ /// Whenever possible, it is preferable to use [`into_inner`][`ManuallyDrop::into_inner`]
+ /// instead, which prevents duplicating the content of the `ManuallyDrop<T>`.
+ ///
+ /// # Safety
+ ///
+ /// This function semantically moves out the contained value without preventing further usage,
+ /// leaving the state of this container unchanged.
+ /// It is your responsibility to ensure that this `ManuallyDrop` is not used again.
+ ///
+ #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"]
+ #[stable(feature = "manually_drop_take", since = "1.42.0")]
+ #[inline]
+ pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
+ // SAFETY: we are reading from a reference, which is guaranteed
+ // to be valid for reads.
+ unsafe { ptr::read(&slot.value) }
+ }
+}
+
+impl<T: ?Sized> ManuallyDrop<T> {
+ /// Manually drops the contained value. This is exactly equivalent to calling
+ /// [`ptr::drop_in_place`] with a pointer to the contained value. As such, unless
+ /// the contained value is a packed struct, the destructor will be called in-place
+ /// without moving the value, and thus can be used to safely drop [pinned] data.
+ ///
+ /// If you have ownership of the value, you can use [`ManuallyDrop::into_inner`] instead.
+ ///
+ /// # Safety
+ ///
+ /// This function runs the destructor of the contained value. Other than changes made by
+ /// the destructor itself, the memory is left unchanged, and so as far as the compiler is
+ /// concerned still holds a bit-pattern which is valid for the type `T`.
+ ///
+ /// However, this "zombie" value should not be exposed to safe code, and this function
+ /// should not be called more than once. To use a value after it's been dropped, or drop
+ /// a value multiple times, can cause Undefined Behavior (depending on what `drop` does).
+ /// This is normally prevented by the type system, but users of `ManuallyDrop` must
+ /// uphold those guarantees without assistance from the compiler.
+ ///
+ /// [pinned]: crate::pin
+ #[stable(feature = "manually_drop", since = "1.20.0")]
+ #[inline]
+ pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {
+ // SAFETY: we are dropping the value pointed to by a mutable reference
+ // which is guaranteed to be valid for writes.
+ // It is up to the caller to make sure that `slot` isn't dropped again.
+ unsafe { ptr::drop_in_place(&mut slot.value) }
+ }
+}
+
+#[stable(feature = "manually_drop", since = "1.20.0")]
+impl<T: ?Sized> Deref for ManuallyDrop<T> {
+ type Target = T;
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ &self.value
+ }
+}
+
+#[stable(feature = "manually_drop", since = "1.20.0")]
+impl<T: ?Sized> DerefMut for ManuallyDrop<T> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+}
--- /dev/null
+use crate::any::type_name;
+use crate::fmt;
+use crate::intrinsics;
+use crate::mem::ManuallyDrop;
+use crate::ptr;
+
+/// A wrapper type to construct uninitialized instances of `T`.
+///
+/// # Initialization invariant
+///
+/// The compiler, in general, assumes that a variable is properly initialized
+/// according to the requirements of the variable's type. For example, a variable of
+/// reference type must be aligned and non-NULL. This is an invariant that must
+/// *always* be upheld, even in unsafe code. As a consequence, zero-initializing a
+/// variable of reference type causes instantaneous [undefined behavior][ub],
+/// no matter whether that reference ever gets used to access memory:
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem::{self, MaybeUninit};
+///
+/// let x: &i32 = unsafe { mem::zeroed() }; // undefined behavior! ⚠️
+/// // The equivalent code with `MaybeUninit<&i32>`:
+/// let x: &i32 = unsafe { MaybeUninit::zeroed().assume_init() }; // undefined behavior! ⚠️
+/// ```
+///
+/// This is exploited by the compiler for various optimizations, such as eliding
+/// run-time checks and optimizing `enum` layout.
+///
+/// Similarly, entirely uninitialized memory may have any content, while a `bool` must
+/// always be `true` or `false`. Hence, creating an uninitialized `bool` is undefined behavior:
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem::{self, MaybeUninit};
+///
+/// let b: bool = unsafe { mem::uninitialized() }; // undefined behavior! ⚠️
+/// // The equivalent code with `MaybeUninit<bool>`:
+/// let b: bool = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! ⚠️
+/// ```
+///
+/// Moreover, uninitialized memory is special in that the compiler knows that
+/// it does not have a fixed value. This makes it undefined behavior to have
+/// uninitialized data in a variable even if that variable has an integer type,
+/// which otherwise can hold any *fixed* bit pattern:
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem::{self, MaybeUninit};
+///
+/// let x: i32 = unsafe { mem::uninitialized() }; // undefined behavior! ⚠️
+/// // The equivalent code with `MaybeUninit<i32>`:
+/// let x: i32 = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! ⚠️
+/// ```
+/// (Notice that the rules around uninitialized integers are not finalized yet, but
+/// until they are, it is advisable to avoid them.)
+///
+/// On top of that, remember that most types have additional invariants beyond merely
+/// being considered initialized at the type level. For example, a `1`-initialized [`Vec<T>`]
+/// is considered initialized (under the current implementation; this does not constitute
+/// a stable guarantee) because the only requirement the compiler knows about it
+/// is that the data pointer must be non-null. Creating such a `Vec<T>` does not cause
+/// *immediate* undefined behavior, but will cause undefined behavior with most
+/// safe operations (including dropping it).
+///
+/// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+///
+/// # Examples
+///
+/// `MaybeUninit<T>` serves to enable unsafe code to deal with uninitialized data.
+/// It is a signal to the compiler indicating that the data here might *not*
+/// be initialized:
+///
+/// ```rust
+/// use std::mem::MaybeUninit;
+///
+/// // Create an explicitly uninitialized reference. The compiler knows that data inside
+/// // a `MaybeUninit<T>` may be invalid, and hence this is not UB:
+/// let mut x = MaybeUninit::<&i32>::uninit();
+/// // Set it to a valid value.
+/// unsafe { x.as_mut_ptr().write(&0); }
+/// // Extract the initialized data -- this is only allowed *after* properly
+/// // initializing `x`!
+/// let x = unsafe { x.assume_init() };
+/// ```
+///
+/// The compiler then knows to not make any incorrect assumptions or optimizations on this code.
+///
+/// You can think of `MaybeUninit<T>` as being a bit like `Option<T>` but without
+/// any of the run-time tracking and without any of the safety checks.
+///
+/// ## out-pointers
+///
+/// You can use `MaybeUninit<T>` to implement "out-pointers": instead of returning data
+/// from a function, pass it a pointer to some (uninitialized) memory to put the
+/// result into. This can be useful when it is important for the caller to control
+/// how the memory the result is stored in gets allocated, and you want to avoid
+/// unnecessary moves.
+///
+/// ```
+/// use std::mem::MaybeUninit;
+///
+/// unsafe fn make_vec(out: *mut Vec<i32>) {
+/// // `write` does not drop the old contents, which is important.
+/// out.write(vec![1, 2, 3]);
+/// }
+///
+/// let mut v = MaybeUninit::uninit();
+/// unsafe { make_vec(v.as_mut_ptr()); }
+/// // Now we know `v` is initialized! This also makes sure the vector gets
+/// // properly dropped.
+/// let v = unsafe { v.assume_init() };
+/// assert_eq!(&v, &[1, 2, 3]);
+/// ```
+///
+/// ## Initializing an array element-by-element
+///
+/// `MaybeUninit<T>` can be used to initialize a large array element-by-element:
+///
+/// ```
+/// use std::mem::{self, MaybeUninit};
+///
+/// let data = {
+/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
+/// // safe because the type we are claiming to have initialized here is a
+/// // bunch of `MaybeUninit`s, which do not require initialization.
+/// let mut data: [MaybeUninit<Vec<u32>>; 1000] = unsafe {
+/// MaybeUninit::uninit().assume_init()
+/// };
+///
+/// // Dropping a `MaybeUninit` does nothing. Thus using raw pointer
+/// // assignment instead of `ptr::write` does not cause the old
+/// // uninitialized value to be dropped. Also if there is a panic during
+/// // this loop, we have a memory leak, but there is no memory safety
+/// // issue.
+/// for elem in &mut data[..] {
+/// *elem = MaybeUninit::new(vec![42]);
+/// }
+///
+/// // Everything is initialized. Transmute the array to the
+/// // initialized type.
+/// unsafe { mem::transmute::<_, [Vec<u32>; 1000]>(data) }
+/// };
+///
+/// assert_eq!(&data[0], &[42]);
+/// ```
+///
+/// You can also work with partially initialized arrays, which could
+/// be found in low-level datastructures.
+///
+/// ```
+/// use std::mem::MaybeUninit;
+/// use std::ptr;
+///
+/// // Create an uninitialized array of `MaybeUninit`. The `assume_init` is
+/// // safe because the type we are claiming to have initialized here is a
+/// // bunch of `MaybeUninit`s, which do not require initialization.
+/// let mut data: [MaybeUninit<String>; 1000] = unsafe { MaybeUninit::uninit().assume_init() };
+/// // Count the number of elements we have assigned.
+/// let mut data_len: usize = 0;
+///
+/// for elem in &mut data[0..500] {
+/// *elem = MaybeUninit::new(String::from("hello"));
+/// data_len += 1;
+/// }
+///
+/// // For each item in the array, drop if we allocated it.
+/// for elem in &mut data[0..data_len] {
+/// unsafe { ptr::drop_in_place(elem.as_mut_ptr()); }
+/// }
+/// ```
+///
+/// ## Initializing a struct field-by-field
+///
+/// There is currently no supported way to create a raw pointer or reference
+/// to a field of a struct inside `MaybeUninit<Struct>`. That means it is not possible
+/// to create a struct by calling `MaybeUninit::uninit::<Struct>()` and then writing
+/// to its fields.
+///
+/// [ub]: ../../reference/behavior-considered-undefined.html
+///
+/// # Layout
+///
+/// `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as `T`:
+///
+/// ```rust
+/// use std::mem::{MaybeUninit, size_of, align_of};
+/// assert_eq!(size_of::<MaybeUninit<u64>>(), size_of::<u64>());
+/// assert_eq!(align_of::<MaybeUninit<u64>>(), align_of::<u64>());
+/// ```
+///
+/// However remember that a type *containing* a `MaybeUninit<T>` is not necessarily the same
+/// layout; Rust does not in general guarantee that the fields of a `Foo<T>` have the same order as
+/// a `Foo<U>` even if `T` and `U` have the same size and alignment. Furthermore because any bit
+/// value is valid for a `MaybeUninit<T>` the compiler can't apply non-zero/niche-filling
+/// optimizations, potentially resulting in a larger size:
+///
+/// ```rust
+/// # use std::mem::{MaybeUninit, size_of};
+/// assert_eq!(size_of::<Option<bool>>(), 1);
+/// assert_eq!(size_of::<Option<MaybeUninit<bool>>>(), 2);
+/// ```
+///
+/// If `T` is FFI-safe, then so is `MaybeUninit<T>`.
+///
+/// While `MaybeUninit` is `#[repr(transparent)]` (indicating it guarantees the same size,
+/// alignment, and ABI as `T`), this does *not* change any of the previous caveats. `Option<T>` and
+/// `Option<MaybeUninit<T>>` may still have different sizes, and types containing a field of type
+/// `T` may be laid out (and sized) differently than if that field were `MaybeUninit<T>`.
+/// `MaybeUninit` is a union type, and `#[repr(transparent)]` on unions is unstable (see [the
+/// tracking issue](https://github.com/rust-lang/rust/issues/60405)). Over time, the exact
+/// guarantees of `#[repr(transparent)]` on unions may evolve, and `MaybeUninit` may or may not
+/// remain `#[repr(transparent)]`. That said, `MaybeUninit<T>` will *always* guarantee that it has
+/// the same size, alignment, and ABI as `T`; it's just that the way `MaybeUninit` implements that
+/// guarantee may evolve.
+#[stable(feature = "maybe_uninit", since = "1.36.0")]
+// Lang item so we can wrap other types in it. This is useful for generators.
+#[lang = "maybe_uninit"]
+#[derive(Copy)]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ uninit: (),
+ value: ManuallyDrop<T>,
+}
+
+#[stable(feature = "maybe_uninit", since = "1.36.0")]
+impl<T: Copy> Clone for MaybeUninit<T> {
+ #[inline(always)]
+ fn clone(&self) -> Self {
+ // Not calling `T::clone()`, we cannot know if we are initialized enough for that.
+ *self
+ }
+}
+
+#[stable(feature = "maybe_uninit_debug", since = "1.41.0")]
+impl<T> fmt::Debug for MaybeUninit<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(type_name::<Self>())
+ }
+}
+
+impl<T> MaybeUninit<T> {
+ /// Creates a new `MaybeUninit<T>` initialized with the given value.
+ /// It is safe to call [`assume_init`] on the return value of this function.
+ ///
+ /// Note that dropping a `MaybeUninit<T>` will never call `T`'s drop code.
+ /// It is your responsibility to make sure `T` gets dropped if it got initialized.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let v: MaybeUninit<Vec<u8>> = MaybeUninit::new(vec![42]);
+ /// ```
+ ///
+ /// [`assume_init`]: MaybeUninit::assume_init
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit", since = "1.36.0")]
+ #[inline(always)]
+ pub const fn new(val: T) -> MaybeUninit<T> {
+ MaybeUninit { value: ManuallyDrop::new(val) }
+ }
+
+ /// Creates a new `MaybeUninit<T>` in an uninitialized state.
+ ///
+ /// Note that dropping a `MaybeUninit<T>` will never call `T`'s drop code.
+ /// It is your responsibility to make sure `T` gets dropped if it got initialized.
+ ///
+ /// See the [type-level documentation][MaybeUninit] for some examples.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let v: MaybeUninit<String> = MaybeUninit::uninit();
+ /// ```
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_stable(feature = "const_maybe_uninit", since = "1.36.0")]
+ #[inline(always)]
+ #[rustc_diagnostic_item = "maybe_uninit_uninit"]
+ pub const fn uninit() -> MaybeUninit<T> {
+ MaybeUninit { uninit: () }
+ }
+
+ /// Create a new array of `MaybeUninit<T>` items, in an uninitialized state.
+ ///
+ /// Note: in a future Rust version this method may become unnecessary
+ /// when array literal syntax allows
+ /// [repeating const expressions](https://github.com/rust-lang/rust/issues/49147).
+ /// The example below could then use `let mut buf = [MaybeUninit::<u8>::uninit(); 32];`.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// #![feature(maybe_uninit_uninit_array, maybe_uninit_extra, maybe_uninit_slice)]
+ ///
+ /// use std::mem::MaybeUninit;
+ ///
+ /// extern "C" {
+ /// fn read_into_buffer(ptr: *mut u8, max_len: usize) -> usize;
+ /// }
+ ///
+ /// /// Returns a (possibly smaller) slice of data that was actually read
+ /// fn read(buf: &mut [MaybeUninit<u8>]) -> &[u8] {
+ /// unsafe {
+ /// let len = read_into_buffer(buf.as_mut_ptr() as *mut u8, buf.len());
+ /// MaybeUninit::slice_assume_init_ref(&buf[..len])
+ /// }
+ /// }
+ ///
+ /// let mut buf: [MaybeUninit<u8>; 32] = MaybeUninit::uninit_array();
+ /// let data = read(&mut buf);
+ /// ```
+ #[unstable(feature = "maybe_uninit_uninit_array", issue = "none")]
+ #[inline(always)]
+ pub fn uninit_array<const LEN: usize>() -> [Self; LEN] {
+ // SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid.
+ unsafe { MaybeUninit::<[MaybeUninit<T>; LEN]>::uninit().assume_init() }
+ }
+
+ /// Creates a new `MaybeUninit<T>` in an uninitialized state, with the memory being
+ /// filled with `0` bytes. It depends on `T` whether that already makes for
+ /// proper initialization. For example, `MaybeUninit<usize>::zeroed()` is initialized,
+ /// but `MaybeUninit<&'static i32>::zeroed()` is not because references must not
+ /// be null.
+ ///
+ /// Note that dropping a `MaybeUninit<T>` will never call `T`'s drop code.
+ /// It is your responsibility to make sure `T` gets dropped if it got initialized.
+ ///
+ /// # Example
+ ///
+ /// Correct usage of this function: initializing a struct with zero, where all
+ /// fields of the struct can hold the bit-pattern 0 as a valid value.
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<(u8, bool)>::zeroed();
+ /// let x = unsafe { x.assume_init() };
+ /// assert_eq!(x, (0, false));
+ /// ```
+ ///
+ /// *Incorrect* usage of this function: calling `x.zeroed().assume_init()`
+ /// when `0` is not a valid bit-pattern for the type:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// enum NotZero { One = 1, Two = 2 };
+ ///
+ /// let x = MaybeUninit::<(u8, NotZero)>::zeroed();
+ /// let x = unsafe { x.assume_init() };
+ /// // Inside a pair, we create a `NotZero` that does not have a valid discriminant.
+ /// // This is undefined behavior. ⚠️
+ /// ```
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[inline]
+ #[rustc_diagnostic_item = "maybe_uninit_zeroed"]
+ pub fn zeroed() -> MaybeUninit<T> {
+ let mut u = MaybeUninit::<T>::uninit();
+ // SAFETY: `u.as_mut_ptr()` points to allocated memory.
+ unsafe {
+ u.as_mut_ptr().write_bytes(0u8, 1);
+ }
+ u
+ }
+
+ /// Sets the value of the `MaybeUninit<T>`. This overwrites any previous value
+ /// without dropping it, so be careful not to use this twice unless you want to
+ /// skip running the destructor. For your convenience, this also returns a mutable
+ /// reference to the (now safely initialized) contents of `self`.
+ #[unstable(feature = "maybe_uninit_extra", issue = "63567")]
+ #[inline(always)]
+ pub fn write(&mut self, val: T) -> &mut T {
+ *self = MaybeUninit::new(val);
+ // SAFETY: We just initialized this value.
+ unsafe { self.assume_init_mut() }
+ }
+
+ /// Gets a pointer to the contained value. Reading from this pointer or turning it
+ /// into a reference is undefined behavior unless the `MaybeUninit<T>` is initialized.
+ /// Writing to memory that this pointer (non-transitively) points to is undefined behavior
+ /// (except inside an `UnsafeCell<T>`).
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// unsafe { x.as_mut_ptr().write(vec![0,1,2]); }
+ /// // Create a reference into the `MaybeUninit<T>`. This is okay because we initialized it.
+ /// let x_vec = unsafe { &*x.as_ptr() };
+ /// assert_eq!(x_vec.len(), 3);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_vec = unsafe { &*x.as_ptr() };
+ /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️
+ /// ```
+ ///
+ /// (Notice that the rules around references to uninitialized data are not finalized yet, but
+ /// until they are, it is advisable to avoid them.)
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_as_ptr", issue = "75251")]
+ #[inline(always)]
+ pub const fn as_ptr(&self) -> *const T {
+ // `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
+ self as *const _ as *const T
+ }
+
+ /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it
+ /// into a reference is undefined behavior unless the `MaybeUninit<T>` is initialized.
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// unsafe { x.as_mut_ptr().write(vec![0,1,2]); }
+ /// // Create a reference into the `MaybeUninit<Vec<u32>>`.
+ /// // This is okay because we initialized it.
+ /// let x_vec = unsafe { &mut *x.as_mut_ptr() };
+ /// x_vec.push(3);
+ /// assert_eq!(x_vec.len(), 4);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_vec = unsafe { &mut *x.as_mut_ptr() };
+ /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️
+ /// ```
+ ///
+ /// (Notice that the rules around references to uninitialized data are not finalized yet, but
+ /// until they are, it is advisable to avoid them.)
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_as_ptr", issue = "75251")]
+ #[inline(always)]
+ pub const fn as_mut_ptr(&mut self) -> *mut T {
+ // `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
+ self as *mut _ as *mut T
+ }
+
+ /// Extracts the value from the `MaybeUninit<T>` container. This is a great way
+ /// to ensure that the data will get dropped, because the resulting `T` is
+ /// subject to the usual drop handling.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` really is in an initialized
+ /// state. Calling this when the content is not yet fully initialized causes immediate undefined
+ /// behavior. The [type-level documentation][inv] contains more information about
+ /// this initialization invariant.
+ ///
+ /// [inv]: #initialization-invariant
+ ///
+ /// On top of that, remember that most types have additional invariants beyond merely
+ /// being considered initialized at the type level. For example, a `1`-initialized [`Vec<T>`]
+ /// is considered initialized (under the current implementation; this does not constitute
+ /// a stable guarantee) because the only requirement the compiler knows about it
+ /// is that the data pointer must be non-null. Creating such a `Vec<T>` does not cause
+ /// *immediate* undefined behavior, but will cause undefined behavior with most
+ /// safe operations (including dropping it).
+ ///
+ /// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<bool>::uninit();
+ /// unsafe { x.as_mut_ptr().write(true); }
+ /// let x_init = unsafe { x.assume_init() };
+ /// assert_eq!(x_init, true);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_init = unsafe { x.assume_init() };
+ /// // `x` had not been initialized yet, so this last line caused undefined behavior. ⚠️
+ /// ```
+ #[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[inline(always)]
+ #[rustc_diagnostic_item = "assume_init"]
+ pub unsafe fn assume_init(self) -> T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // This also means that `self` must be a `value` variant.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ ManuallyDrop::into_inner(self.value)
+ }
+ }
+
+ /// Reads the value from the `MaybeUninit<T>` container. The resulting `T` is subject
+ /// to the usual drop handling.
+ ///
+ /// Whenever possible, it is preferable to use [`assume_init`] instead, which
+ /// prevents duplicating the content of the `MaybeUninit<T>`.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` really is in an initialized
+ /// state. Calling this when the content is not yet fully initialized causes undefined
+ /// behavior. The [type-level documentation][inv] contains more information about
+ /// this initialization invariant.
+ ///
+ /// Moreover, this leaves a copy of the same data behind in the `MaybeUninit<T>`. When using
+ /// multiple copies of the data (by calling `assume_init_read` multiple times, or first
+ /// calling `assume_init_read` and then [`assume_init`]), it is your responsibility
+ /// to ensure that that data may indeed be duplicated.
+ ///
+ /// [inv]: #initialization-invariant
+ /// [`assume_init`]: MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// Correct usage of this method:
+ ///
+ /// ```rust
+ /// #![feature(maybe_uninit_extra)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<u32>::uninit();
+ /// x.write(13);
+ /// let x1 = unsafe { x.assume_init_read() };
+ /// // `u32` is `Copy`, so we may read multiple times.
+ /// let x2 = unsafe { x.assume_init_read() };
+ /// assert_eq!(x1, x2);
+ ///
+ /// let mut x = MaybeUninit::<Option<Vec<u32>>>::uninit();
+ /// x.write(None);
+ /// let x1 = unsafe { x.assume_init_read() };
+ /// // Duplicating a `None` value is okay, so we may read multiple times.
+ /// let x2 = unsafe { x.assume_init_read() };
+ /// assert_eq!(x1, x2);
+ /// ```
+ ///
+ /// *Incorrect* usage of this method:
+ ///
+ /// ```rust,no_run
+ /// #![feature(maybe_uninit_extra)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Option<Vec<u32>>>::uninit();
+ /// x.write(Some(vec![0,1,2]));
+ /// let x1 = unsafe { x.assume_init_read() };
+ /// let x2 = unsafe { x.assume_init_read() };
+ /// // We now created two copies of the same vector, leading to a double-free ⚠️ when
+ /// // they both get dropped!
+ /// ```
+ #[unstable(feature = "maybe_uninit_extra", issue = "63567")]
+ #[inline(always)]
+ pub unsafe fn assume_init_read(&self) -> T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // Reading from `self.as_ptr()` is safe since `self` should be initialized.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ self.as_ptr().read()
+ }
+ }
+
+ /// Drops the contained value in place.
+ ///
+ /// If you have ownership of the `MaybeUninit`, you can use [`assume_init`] instead.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` really is
+ /// in an initialized state. Calling this when the content is not yet fully
+ /// initialized causes undefined behavior.
+ ///
+ /// On top of that, all additional invariants of the type `T` must be
+ /// satisfied, as the `Drop` implementation of `T` (or its members) may
+ /// rely on this. For example, a `1`-initialized [`Vec<T>`] is considered
+ /// initialized (under the current implementation; this does not constitute
+ /// a stable guarantee) because the only requirement the compiler knows
+ /// about it is that the data pointer must be non-null. Dropping such a
+ /// `Vec<T>` however will cause undefined behaviour.
+ ///
+ /// [`assume_init`]: MaybeUninit::assume_init
+ /// [`Vec<T>`]: ../../std/vec/struct.Vec.html
+ #[unstable(feature = "maybe_uninit_extra", issue = "63567")]
+ pub unsafe fn assume_init_drop(&mut self) {
+ // SAFETY: the caller must guarantee that `self` is initialized and
+ // satisfies all invariants of `T`.
+ // Dropping the value in place is safe if that is the case.
+ unsafe { ptr::drop_in_place(self.as_mut_ptr()) }
+ }
+
+ /// Gets a shared reference to the contained value.
+ ///
+ /// This can be useful when we want to access a `MaybeUninit` that has been
+ /// initialized but don't have ownership of the `MaybeUninit` (preventing the use
+ /// of `.assume_init()`).
+ ///
+ /// # Safety
+ ///
+ /// Calling this when the content is not yet fully initialized causes undefined
+ /// behavior: it is up to the caller to guarantee that the `MaybeUninit<T>` really
+ /// is in an initialized state.
+ ///
+ /// # Examples
+ ///
+ /// ### Correct usage of this method:
+ ///
+ /// ```rust
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut x = MaybeUninit::<Vec<u32>>::uninit();
+ /// // Initialize `x`:
+ /// unsafe { x.as_mut_ptr().write(vec![1, 2, 3]); }
+ /// // Now that our `MaybeUninit<_>` is known to be initialized, it is okay to
+ /// // create a shared reference to it:
+ /// let x: &Vec<u32> = unsafe {
+ /// // SAFETY: `x` has been initialized.
+ /// x.assume_init_ref()
+ /// };
+ /// assert_eq!(x, &vec![1, 2, 3]);
+ /// ```
+ ///
+ /// ### *Incorrect* usages of this method:
+ ///
+ /// ```rust,no_run
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let x = MaybeUninit::<Vec<u32>>::uninit();
+ /// let x_vec: &Vec<u32> = unsafe { x.assume_init_ref() };
+ /// // We have created a reference to an uninitialized vector! This is undefined behavior. ⚠️
+ /// ```
+ ///
+ /// ```rust,no_run
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::{cell::Cell, mem::MaybeUninit};
+ ///
+ /// let b = MaybeUninit::<Cell<bool>>::uninit();
+ /// // Initialize the `MaybeUninit` using `Cell::set`:
+ /// unsafe {
+ /// b.assume_init_ref().set(true);
+ /// // ^^^^^^^^^^^^^^^
+ /// // Reference to an uninitialized `Cell<bool>`: UB!
+ /// }
+ /// ```
+ #[unstable(feature = "maybe_uninit_ref", issue = "63568")]
+ #[inline(always)]
+ pub unsafe fn assume_init_ref(&self) -> &T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // This also means that `self` must be a `value` variant.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ &*self.value
+ }
+ }
+
+ /// Gets a mutable (unique) reference to the contained value.
+ ///
+ /// This can be useful when we want to access a `MaybeUninit` that has been
+ /// initialized but don't have ownership of the `MaybeUninit` (preventing the use
+ /// of `.assume_init()`).
+ ///
+ /// # Safety
+ ///
+ /// Calling this when the content is not yet fully initialized causes undefined
+ /// behavior: it is up to the caller to guarantee that the `MaybeUninit<T>` really
+ /// is in an initialized state. For instance, `.assume_init_mut()` cannot be used to
+ /// initialize a `MaybeUninit`.
+ ///
+ /// # Examples
+ ///
+ /// ### Correct usage of this method:
+ ///
+ /// ```rust
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// # unsafe extern "C" fn initialize_buffer(buf: *mut [u8; 2048]) { *buf = [0; 2048] }
+ /// # #[cfg(FALSE)]
+ /// extern "C" {
+ /// /// Initializes *all* the bytes of the input buffer.
+ /// fn initialize_buffer(buf: *mut [u8; 2048]);
+ /// }
+ ///
+ /// let mut buf = MaybeUninit::<[u8; 2048]>::uninit();
+ ///
+ /// // Initialize `buf`:
+ /// unsafe { initialize_buffer(buf.as_mut_ptr()); }
+ /// // Now we know that `buf` has been initialized, so we could `.assume_init()` it.
+ /// // However, using `.assume_init()` may trigger a `memcpy` of the 2048 bytes.
+ /// // To assert our buffer has been initialized without copying it, we upgrade
+ /// // the `&mut MaybeUninit<[u8; 2048]>` to a `&mut [u8; 2048]`:
+ /// let buf: &mut [u8; 2048] = unsafe {
+ /// // SAFETY: `buf` has been initialized.
+ /// buf.assume_init_mut()
+ /// };
+ ///
+ /// // Now we can use `buf` as a normal slice:
+ /// buf.sort_unstable();
+ /// assert!(
+ /// buf.windows(2).all(|pair| pair[0] <= pair[1]),
+ /// "buffer is sorted",
+ /// );
+ /// ```
+ ///
+ /// ### *Incorrect* usages of this method:
+ ///
+ /// You cannot use `.assume_init_mut()` to initialize a value:
+ ///
+ /// ```rust,no_run
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut b = MaybeUninit::<bool>::uninit();
+ /// unsafe {
+ /// *b.assume_init_mut() = true;
+ /// // We have created a (mutable) reference to an uninitialized `bool`!
+ /// // This is undefined behavior. ⚠️
+ /// }
+ /// ```
+ ///
+ /// For instance, you cannot [`Read`] into an uninitialized buffer:
+ ///
+ /// [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
+ ///
+ /// ```rust,no_run
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::{io, mem::MaybeUninit};
+ ///
+ /// fn read_chunk (reader: &'_ mut dyn io::Read) -> io::Result<[u8; 64]>
+ /// {
+ /// let mut buffer = MaybeUninit::<[u8; 64]>::uninit();
+ /// reader.read_exact(unsafe { buffer.assume_init_mut() })?;
+ /// // ^^^^^^^^^^^^^^^^^^^^^^^^
+ /// // (mutable) reference to uninitialized memory!
+ /// // This is undefined behavior.
+ /// Ok(unsafe { buffer.assume_init() })
+ /// }
+ /// ```
+ ///
+ /// Nor can you use direct field access to do field-by-field gradual initialization:
+ ///
+ /// ```rust,no_run
+ /// #![feature(maybe_uninit_ref)]
+ /// use std::{mem::MaybeUninit, ptr};
+ ///
+ /// struct Foo {
+ /// a: u32,
+ /// b: u8,
+ /// }
+ ///
+ /// let foo: Foo = unsafe {
+ /// let mut foo = MaybeUninit::<Foo>::uninit();
+ /// ptr::write(&mut foo.assume_init_mut().a as *mut u32, 1337);
+ /// // ^^^^^^^^^^^^^^^^^^^^^
+ /// // (mutable) reference to uninitialized memory!
+ /// // This is undefined behavior.
+ /// ptr::write(&mut foo.assume_init_mut().b as *mut u8, 42);
+ /// // ^^^^^^^^^^^^^^^^^^^^^
+ /// // (mutable) reference to uninitialized memory!
+ /// // This is undefined behavior.
+ /// foo.assume_init()
+ /// };
+ /// ```
+ // FIXME(#76092): We currently rely on the above being incorrect, i.e., we have references
+ // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make
+ // a final decision about the rules before stabilization.
+ #[unstable(feature = "maybe_uninit_ref", issue = "63568")]
+ #[inline(always)]
+ pub unsafe fn assume_init_mut(&mut self) -> &mut T {
+ // SAFETY: the caller must guarantee that `self` is initialized.
+ // This also means that `self` must be a `value` variant.
+ unsafe {
+ intrinsics::assert_inhabited::<T>();
+ &mut *self.value
+ }
+ }
+
+ /// Assuming all the elements are initialized, get a slice to them.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` elements
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized causes undefined behavior.
+ ///
+ /// See [`assume_init_ref`] for more details and examples.
+ ///
+ /// [`assume_init_ref`]: MaybeUninit::assume_init_ref
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub unsafe fn slice_assume_init_ref(slice: &[Self]) -> &[T] {
+ // SAFETY: casting slice to a `*const [T]` is safe since the caller guarantees that
+ // `slice` is initialized, and`MaybeUninit` is guaranteed to have the same layout as `T`.
+ // The pointer obtained is valid since it refers to memory owned by `slice` which is a
+ // reference and thus guaranteed to be valid for reads.
+ unsafe { &*(slice as *const [Self] as *const [T]) }
+ }
+
+ /// Assuming all the elements are initialized, get a mutable slice to them.
+ ///
+ /// # Safety
+ ///
+ /// It is up to the caller to guarantee that the `MaybeUninit<T>` elements
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized causes undefined behavior.
+ ///
+ /// See [`assume_init_mut`] for more details and examples.
+ ///
+ /// [`assume_init_mut`]: MaybeUninit::assume_init_mut
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub unsafe fn slice_assume_init_mut(slice: &mut [Self]) -> &mut [T] {
+ // SAFETY: similar to safety notes for `slice_get_ref`, but we have a
+ // mutable reference which is also guaranteed to be valid for writes.
+ unsafe { &mut *(slice as *mut [Self] as *mut [T]) }
+ }
+
+ /// Gets a pointer to the first element of the array.
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub fn slice_as_ptr(this: &[MaybeUninit<T>]) -> *const T {
+ this.as_ptr() as *const T
+ }
+
+ /// Gets a mutable pointer to the first element of the array.
+ #[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[inline(always)]
+ pub fn slice_as_mut_ptr(this: &mut [MaybeUninit<T>]) -> *mut T {
+ this.as_mut_ptr() as *mut T
+ }
+}
--- /dev/null
+//! Basic functions for dealing with memory.
+//!
+//! This module contains functions for querying the size and alignment of
+//! types, initializing and manipulating memory.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::clone;
+use crate::cmp;
+use crate::fmt;
+use crate::hash;
+use crate::intrinsics;
+use crate::marker::{Copy, DiscriminantKind, Sized};
+use crate::ptr;
+
+mod manually_drop;
+#[stable(feature = "manually_drop", since = "1.20.0")]
+pub use manually_drop::ManuallyDrop;
+
+mod maybe_uninit;
+#[stable(feature = "maybe_uninit", since = "1.36.0")]
+pub use maybe_uninit::MaybeUninit;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::transmute;
+
+/// Takes ownership and "forgets" about the value **without running its destructor**.
+///
+/// Any resources the value manages, such as heap memory or a file handle, will linger
+/// forever in an unreachable state. However, it does not guarantee that pointers
+/// to this memory will remain valid.
+///
+/// * If you want to leak memory, see [`Box::leak`].
+/// * If you want to obtain a raw pointer to the memory, see [`Box::into_raw`].
+/// * If you want to dispose of a value properly, running its destructor, see
+/// [`mem::drop`].
+///
+/// # Safety
+///
+/// `forget` is not marked as `unsafe`, because Rust's safety guarantees
+/// do not include a guarantee that destructors will always run. For example,
+/// a program can create a reference cycle using [`Rc`][rc], or call
+/// [`process::exit`][exit] to exit without running destructors. Thus, allowing
+/// `mem::forget` from safe code does not fundamentally change Rust's safety
+/// guarantees.
+///
+/// That said, leaking resources such as memory or I/O objects is usually undesirable.
+/// The need comes up in some specialized use cases for FFI or unsafe code, but even
+/// then, [`ManuallyDrop`] is typically preferred.
+///
+/// Because forgetting a value is allowed, any `unsafe` code you write must
+/// allow for this possibility. You cannot return a value and expect that the
+/// caller will necessarily run the value's destructor.
+///
+/// [rc]: ../../std/rc/struct.Rc.html
+/// [exit]: ../../std/process/fn.exit.html
+///
+/// # Examples
+///
+/// The canonical safe use of `mem::forget` is to circumvent a value's destructor
+/// implemented by the `Drop` trait. For example, this will leak a `File`, i.e. reclaim
+/// the space taken by the variable but never close the underlying system resource:
+///
+/// ```no_run
+/// use std::mem;
+/// use std::fs::File;
+///
+/// let file = File::open("foo.txt").unwrap();
+/// mem::forget(file);
+/// ```
+///
+/// This is useful when the ownership of the underlying resource was previously
+/// transferred to code outside of Rust, for example by transmitting the raw
+/// file descriptor to C code.
+///
+/// # Relationship with `ManuallyDrop`
+///
+/// While `mem::forget` can also be used to transfer *memory* ownership, doing so is error-prone.
+/// [`ManuallyDrop`] should be used instead. Consider, for example, this code:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v = vec![65, 122];
+/// // Build a `String` using the contents of `v`
+/// let s = unsafe { String::from_raw_parts(v.as_mut_ptr(), v.len(), v.capacity()) };
+/// // leak `v` because its memory is now managed by `s`
+/// mem::forget(v); // ERROR - v is invalid and must not be passed to a function
+/// assert_eq!(s, "Az");
+/// // `s` is implicitly dropped and its memory deallocated.
+/// ```
+///
+/// There are two issues with the above example:
+///
+/// * If more code were added between the construction of `String` and the invocation of
+/// `mem::forget()`, a panic within it would cause a double free because the same memory
+/// is handled by both `v` and `s`.
+/// * After calling `v.as_mut_ptr()` and transmitting the ownership of the data to `s`,
+/// the `v` value is invalid. Even when a value is just moved to `mem::forget` (which won't
+/// inspect it), some types have strict requirements on their values that
+/// make them invalid when dangling or no longer owned. Using invalid values in any
+/// way, including passing them to or returning them from functions, constitutes
+/// undefined behavior and may break the assumptions made by the compiler.
+///
+/// Switching to `ManuallyDrop` avoids both issues:
+///
+/// ```
+/// use std::mem::ManuallyDrop;
+///
+/// let v = vec![65, 122];
+/// // Before we disassemble `v` into its raw parts, make sure it
+/// // does not get dropped!
+/// let mut v = ManuallyDrop::new(v);
+/// // Now disassemble `v`. These operations cannot panic, so there cannot be a leak.
+/// let (ptr, len, cap) = (v.as_mut_ptr(), v.len(), v.capacity());
+/// // Finally, build a `String`.
+/// let s = unsafe { String::from_raw_parts(ptr, len, cap) };
+/// assert_eq!(s, "Az");
+/// // `s` is implicitly dropped and its memory deallocated.
+/// ```
+///
+/// `ManuallyDrop` robustly prevents double-free because we disable `v`'s destructor
+/// before doing anything else. `mem::forget()` doesn't allow this because it consumes its
+/// argument, forcing us to call it only after extracting anything we need from `v`. Even
+/// if a panic were introduced between construction of `ManuallyDrop` and building the
+/// string (which cannot happen in the code as shown), it would result in a leak and not a
+/// double free. In other words, `ManuallyDrop` errs on the side of leaking instead of
+/// erring on the side of (double-)dropping.
+///
+/// Also, `ManuallyDrop` prevents us from having to "touch" `v` after transferring the
+/// ownership to `s` — the final step of interacting with `v` to dispose of it without
+/// running its destructor is entirely avoided.
+///
+/// [`Box`]: ../../std/boxed/struct.Box.html
+/// [`Box::leak`]: ../../std/boxed/struct.Box.html#method.leak
+/// [`Box::into_raw`]: ../../std/boxed/struct.Box.html#method.into_raw
+/// [`mem::drop`]: drop
+/// [ub]: ../../reference/behavior-considered-undefined.html
+#[inline]
+#[rustc_const_stable(feature = "const_forget", since = "1.46.0")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const fn forget<T>(t: T) {
+ let _ = ManuallyDrop::new(t);
+}
+
+/// Like [`forget`], but also accepts unsized values.
+///
+/// This function is just a shim intended to be removed when the `unsized_locals` feature gets
+/// stabilized.
+#[inline]
+#[unstable(feature = "forget_unsized", issue = "none")]
+pub fn forget_unsized<T: ?Sized>(t: T) {
+ // SAFETY: the forget intrinsic could be safe, but there's no point in making it safe since
+ // we'll be implementing this function soon via `ManuallyDrop`
+ unsafe { intrinsics::forget(t) }
+}
+
+/// Returns the size of a type in bytes.
+///
+/// More specifically, this is the offset in bytes between successive elements
+/// in an array with that item type including alignment padding. Thus, for any
+/// type `T` and length `n`, `[T; n]` has a size of `n * size_of::<T>()`.
+///
+/// In general, the size of a type is not stable across compilations, but
+/// specific types such as primitives are.
+///
+/// The following table gives the size for primitives.
+///
+/// Type | size_of::\<Type>()
+/// ---- | ---------------
+/// () | 0
+/// bool | 1
+/// u8 | 1
+/// u16 | 2
+/// u32 | 4
+/// u64 | 8
+/// u128 | 16
+/// i8 | 1
+/// i16 | 2
+/// i32 | 4
+/// i64 | 8
+/// i128 | 16
+/// f32 | 4
+/// f64 | 8
+/// char | 4
+///
+/// Furthermore, `usize` and `isize` have the same size.
+///
+/// The types `*const T`, `&T`, `Box<T>`, `Option<&T>`, and `Option<Box<T>>` all have
+/// the same size. If `T` is Sized, all of those types have the same size as `usize`.
+///
+/// The mutability of a pointer does not change its size. As such, `&T` and `&mut T`
+/// have the same size. Likewise for `*const T` and `*mut T`.
+///
+/// # Size of `#[repr(C)]` items
+///
+/// The `C` representation for items has a defined layout. With this layout,
+/// the size of items is also stable as long as all fields have a stable size.
+///
+/// ## Size of Structs
+///
+/// For `structs`, the size is determined by the following algorithm.
+///
+/// For each field in the struct ordered by declaration order:
+///
+/// 1. Add the size of the field.
+/// 2. Round up the current size to the nearest multiple of the next field's [alignment].
+///
+/// Finally, round the size of the struct to the nearest multiple of its [alignment].
+/// The alignment of the struct is usually the largest alignment of all its
+/// fields; this can be changed with the use of `repr(align(N))`.
+///
+/// Unlike `C`, zero sized structs are not rounded up to one byte in size.
+///
+/// ## Size of Enums
+///
+/// Enums that carry no data other than the discriminant have the same size as C enums
+/// on the platform they are compiled for.
+///
+/// ## Size of Unions
+///
+/// The size of a union is the size of its largest field.
+///
+/// Unlike `C`, zero sized unions are not rounded up to one byte in size.
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// // Some primitives
+/// assert_eq!(4, mem::size_of::<i32>());
+/// assert_eq!(8, mem::size_of::<f64>());
+/// assert_eq!(0, mem::size_of::<()>());
+///
+/// // Some arrays
+/// assert_eq!(8, mem::size_of::<[i32; 2]>());
+/// assert_eq!(12, mem::size_of::<[i32; 3]>());
+/// assert_eq!(0, mem::size_of::<[i32; 0]>());
+///
+///
+/// // Pointer size equality
+/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<*const i32>());
+/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Box<i32>>());
+/// assert_eq!(mem::size_of::<&i32>(), mem::size_of::<Option<&i32>>());
+/// assert_eq!(mem::size_of::<Box<i32>>(), mem::size_of::<Option<Box<i32>>>());
+/// ```
+///
+/// Using `#[repr(C)]`.
+///
+/// ```
+/// use std::mem;
+///
+/// #[repr(C)]
+/// struct FieldStruct {
+/// first: u8,
+/// second: u16,
+/// third: u8
+/// }
+///
+/// // The size of the first field is 1, so add 1 to the size. Size is 1.
+/// // The alignment of the second field is 2, so add 1 to the size for padding. Size is 2.
+/// // The size of the second field is 2, so add 2 to the size. Size is 4.
+/// // The alignment of the third field is 1, so add 0 to the size for padding. Size is 4.
+/// // The size of the third field is 1, so add 1 to the size. Size is 5.
+/// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its
+/// // fields is 2), so add 1 to the size for padding. Size is 6.
+/// assert_eq!(6, mem::size_of::<FieldStruct>());
+///
+/// #[repr(C)]
+/// struct TupleStruct(u8, u16, u8);
+///
+/// // Tuple structs follow the same rules.
+/// assert_eq!(6, mem::size_of::<TupleStruct>());
+///
+/// // Note that reordering the fields can lower the size. We can remove both padding bytes
+/// // by putting `third` before `second`.
+/// #[repr(C)]
+/// struct FieldStructOptimized {
+/// first: u8,
+/// third: u8,
+/// second: u16
+/// }
+///
+/// assert_eq!(4, mem::size_of::<FieldStructOptimized>());
+///
+/// // Union size is the size of the largest field.
+/// #[repr(C)]
+/// union ExampleUnion {
+/// smaller: u8,
+/// larger: u16
+/// }
+///
+/// assert_eq!(2, mem::size_of::<ExampleUnion>());
+/// ```
+///
+/// [alignment]: align_of
+#[inline(always)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_size_of", since = "1.32.0")]
+pub const fn size_of<T>() -> usize {
+ intrinsics::size_of::<T>()
+}
+
+/// Returns the size of the pointed-to value in bytes.
+///
+/// This is usually the same as `size_of::<T>()`. However, when `T` *has* no
+/// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object],
+/// then `size_of_val` can be used to get the dynamically-known size.
+///
+/// [slice]: ../../std/primitive.slice.html
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// assert_eq!(4, mem::size_of_val(&5i32));
+///
+/// let x: [u8; 13] = [0; 13];
+/// let y: &[u8] = &x;
+/// assert_eq!(13, mem::size_of_val(y));
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_size_of_val", issue = "46571")]
+pub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
+ intrinsics::size_of_val(val)
+}
+
+/// Returns the size of the pointed-to value in bytes.
+///
+/// This is usually the same as `size_of::<T>()`. However, when `T` *has* no
+/// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object],
+/// then `size_of_val_raw` can be used to get the dynamically-known size.
+///
+/// # Safety
+///
+/// This function is only safe to call if the following conditions hold:
+///
+/// - If `T` is `Sized`, this function is always safe to call.
+/// - If the unsized tail of `T` is:
+/// - a [slice], then the length of the slice tail must be an initialized
+/// integer, and the size of the *entire value*
+/// (dynamic tail length + statically sized prefix) must fit in `isize`.
+/// - a [trait object], then the vtable part of the pointer must point
+/// to a valid vtable acquired by an unsizing coercion, and the size
+/// of the *entire value* (dynamic tail length + statically sized prefix)
+/// must fit in `isize`.
+/// - an (unstable) [extern type], then this function is always safe to
+/// call, but may panic or otherwise return the wrong value, as the
+/// extern type's layout is not known. This is the same behavior as
+/// [`size_of_val`] on a reference to a type with an extern type tail.
+/// - otherwise, it is conservatively not allowed to call this function.
+///
+/// [slice]: ../../std/primitive.slice.html
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+/// [extern type]: ../../unstable-book/language-features/extern-types.html
+///
+/// # Examples
+///
+/// ```
+/// #![feature(layout_for_ptr)]
+/// use std::mem;
+///
+/// assert_eq!(4, mem::size_of_val(&5i32));
+///
+/// let x: [u8; 13] = [0; 13];
+/// let y: &[u8] = &x;
+/// assert_eq!(13, unsafe { mem::size_of_val_raw(y) });
+/// ```
+#[inline]
+#[unstable(feature = "layout_for_ptr", issue = "69835")]
+pub unsafe fn size_of_val_raw<T: ?Sized>(val: *const T) -> usize {
+ intrinsics::size_of_val(val)
+}
+
+/// Returns the [ABI]-required minimum alignment of a type.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// # #![allow(deprecated)]
+/// use std::mem;
+///
+/// assert_eq!(4, mem::min_align_of::<i32>());
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(reason = "use `align_of` instead", since = "1.2.0")]
+pub fn min_align_of<T>() -> usize {
+ intrinsics::min_align_of::<T>()
+}
+
+/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// # #![allow(deprecated)]
+/// use std::mem;
+///
+/// assert_eq!(4, mem::min_align_of_val(&5i32));
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(reason = "use `align_of_val` instead", since = "1.2.0")]
+pub fn min_align_of_val<T: ?Sized>(val: &T) -> usize {
+ intrinsics::min_align_of_val(val)
+}
+
+/// Returns the [ABI]-required minimum alignment of a type.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// This is the alignment used for struct fields. It may be smaller than the preferred alignment.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// assert_eq!(4, mem::align_of::<i32>());
+/// ```
+#[inline(always)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_align_of", since = "1.32.0")]
+pub const fn align_of<T>() -> usize {
+ intrinsics::min_align_of::<T>()
+}
+
+/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// assert_eq!(4, mem::align_of_val(&5i32));
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
+#[allow(deprecated)]
+pub const fn align_of_val<T: ?Sized>(val: &T) -> usize {
+ intrinsics::min_align_of_val(val)
+}
+
+/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to.
+///
+/// Every reference to a value of the type `T` must be a multiple of this number.
+///
+/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface
+///
+/// # Safety
+///
+/// This function is only safe to call if the following conditions hold:
+///
+/// - If `T` is `Sized`, this function is always safe to call.
+/// - If the unsized tail of `T` is:
+/// - a [slice], then the length of the slice tail must be an initialized
+/// integer, and the size of the *entire value*
+/// (dynamic tail length + statically sized prefix) must fit in `isize`.
+/// - a [trait object], then the vtable part of the pointer must point
+/// to a valid vtable acquired by an unsizing coercion, and the size
+/// of the *entire value* (dynamic tail length + statically sized prefix)
+/// must fit in `isize`.
+/// - an (unstable) [extern type], then this function is always safe to
+/// call, but may panic or otherwise return the wrong value, as the
+/// extern type's layout is not known. This is the same behavior as
+/// [`align_of_val`] on a reference to a type with an extern type tail.
+/// - otherwise, it is conservatively not allowed to call this function.
+///
+/// [slice]: ../../std/primitive.slice.html
+/// [trait object]: ../../book/ch17-02-trait-objects.html
+/// [extern type]: ../../unstable-book/language-features/extern-types.html
+///
+/// # Examples
+///
+/// ```
+/// #![feature(layout_for_ptr)]
+/// use std::mem;
+///
+/// assert_eq!(4, unsafe { mem::align_of_val_raw(&5i32) });
+/// ```
+#[inline]
+#[unstable(feature = "layout_for_ptr", issue = "69835")]
+pub unsafe fn align_of_val_raw<T: ?Sized>(val: *const T) -> usize {
+ intrinsics::min_align_of_val(val)
+}
+
+/// Returns `true` if dropping values of type `T` matters.
+///
+/// This is purely an optimization hint, and may be implemented conservatively:
+/// it may return `true` for types that don't actually need to be dropped.
+/// As such always returning `true` would be a valid implementation of
+/// this function. However if this function actually returns `false`, then you
+/// can be certain dropping `T` has no side effect.
+///
+/// Low level implementations of things like collections, which need to manually
+/// drop their data, should use this function to avoid unnecessarily
+/// trying to drop all their contents when they are destroyed. This might not
+/// make a difference in release builds (where a loop that has no side-effects
+/// is easily detected and eliminated), but is often a big win for debug builds.
+///
+/// Note that [`drop_in_place`] already performs this check, so if your workload
+/// can be reduced to some small number of [`drop_in_place`] calls, using this is
+/// unnecessary. In particular note that you can [`drop_in_place`] a slice, and that
+/// will do a single needs_drop check for all the values.
+///
+/// Types like Vec therefore just `drop_in_place(&mut self[..])` without using
+/// `needs_drop` explicitly. Types like [`HashMap`], on the other hand, have to drop
+/// values one at a time and should use this API.
+///
+/// [`drop_in_place`]: crate::ptr::drop_in_place
+/// [`HashMap`]: ../../std/collections/struct.HashMap.html
+///
+/// # Examples
+///
+/// Here's an example of how a collection might make use of `needs_drop`:
+///
+/// ```
+/// use std::{mem, ptr};
+///
+/// pub struct MyCollection<T> {
+/// # data: [T; 1],
+/// /* ... */
+/// }
+/// # impl<T> MyCollection<T> {
+/// # fn iter_mut(&mut self) -> &mut [T] { &mut self.data }
+/// # fn free_buffer(&mut self) {}
+/// # }
+///
+/// impl<T> Drop for MyCollection<T> {
+/// fn drop(&mut self) {
+/// unsafe {
+/// // drop the data
+/// if mem::needs_drop::<T>() {
+/// for x in self.iter_mut() {
+/// ptr::drop_in_place(x);
+/// }
+/// }
+/// self.free_buffer();
+/// }
+/// }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "needs_drop", since = "1.21.0")]
+#[rustc_const_stable(feature = "const_needs_drop", since = "1.36.0")]
+#[rustc_diagnostic_item = "needs_drop"]
+pub const fn needs_drop<T>() -> bool {
+ intrinsics::needs_drop::<T>()
+}
+
+/// Returns the value of type `T` represented by the all-zero byte-pattern.
+///
+/// This means that, for example, the padding byte in `(u8, u16)` is not
+/// necessarily zeroed.
+///
+/// There is no guarantee that an all-zero byte-pattern represents a valid value
+/// of some type `T`. For example, the all-zero byte-pattern is not a valid value
+/// for reference types (`&T`, `&mut T`) and functions pointers. Using `zeroed`
+/// on such types causes immediate [undefined behavior][ub] because [the Rust
+/// compiler assumes][inv] that there always is a valid value in a variable it
+/// considers initialized.
+///
+/// This has the same effect as [`MaybeUninit::zeroed().assume_init()`][zeroed].
+/// It is useful for FFI sometimes, but should generally be avoided.
+///
+/// [zeroed]: MaybeUninit::zeroed
+/// [ub]: ../../reference/behavior-considered-undefined.html
+/// [inv]: MaybeUninit#initialization-invariant
+///
+/// # Examples
+///
+/// Correct usage of this function: initializing an integer with zero.
+///
+/// ```
+/// use std::mem;
+///
+/// let x: i32 = unsafe { mem::zeroed() };
+/// assert_eq!(0, x);
+/// ```
+///
+/// *Incorrect* usage of this function: initializing a reference with zero.
+///
+/// ```rust,no_run
+/// # #![allow(invalid_value)]
+/// use std::mem;
+///
+/// let _x: &i32 = unsafe { mem::zeroed() }; // Undefined behavior!
+/// let _y: fn() = unsafe { mem::zeroed() }; // And again!
+/// ```
+#[inline(always)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated_in_future)]
+#[allow(deprecated)]
+#[rustc_diagnostic_item = "mem_zeroed"]
+pub unsafe fn zeroed<T>() -> T {
+ // SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
+ unsafe {
+ intrinsics::assert_zero_valid::<T>();
+ MaybeUninit::zeroed().assume_init()
+ }
+}
+
+/// Bypasses Rust's normal memory-initialization checks by pretending to
+/// produce a value of type `T`, while doing nothing at all.
+///
+/// **This function is deprecated.** Use [`MaybeUninit<T>`] instead.
+///
+/// The reason for deprecation is that the function basically cannot be used
+/// correctly: it has the same effect as [`MaybeUninit::uninit().assume_init()`][uninit].
+/// As the [`assume_init` documentation][assume_init] explains,
+/// [the Rust compiler assumes][inv] that values are properly initialized.
+/// As a consequence, calling e.g. `mem::uninitialized::<bool>()` causes immediate
+/// undefined behavior for returning a `bool` that is not definitely either `true`
+/// or `false`. Worse, truly uninitialized memory like what gets returned here
+/// is special in that the compiler knows that it does not have a fixed value.
+/// This makes it undefined behavior to have uninitialized data in a variable even
+/// if that variable has an integer type.
+/// (Notice that the rules around uninitialized integers are not finalized yet, but
+/// until they are, it is advisable to avoid them.)
+///
+/// [`MaybeUninit<T>`]: MaybeUninit
+/// [uninit]: MaybeUninit::uninit
+/// [assume_init]: MaybeUninit::assume_init
+/// [inv]: MaybeUninit#initialization-invariant
+#[inline(always)]
+#[rustc_deprecated(since = "1.39.0", reason = "use `mem::MaybeUninit` instead")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated_in_future)]
+#[allow(deprecated)]
+#[rustc_diagnostic_item = "mem_uninitialized"]
+pub unsafe fn uninitialized<T>() -> T {
+ // SAFETY: the caller must guarantee that an unitialized value is valid for `T`.
+ unsafe {
+ intrinsics::assert_uninit_valid::<T>();
+ MaybeUninit::uninit().assume_init()
+ }
+}
+
+/// Swaps the values at two mutable locations, without deinitializing either one.
+///
+/// * If you want to swap with a default or dummy value, see [`take`].
+/// * If you want to swap with a passed value, returning the old value, see [`replace`].
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// let mut x = 5;
+/// let mut y = 42;
+///
+/// mem::swap(&mut x, &mut y);
+///
+/// assert_eq!(42, x);
+/// assert_eq!(5, y);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn swap<T>(x: &mut T, y: &mut T) {
+ // SAFETY: the raw pointers have been created from safe mutable references satisfying all the
+ // constraints on `ptr::swap_nonoverlapping_one`
+ unsafe {
+ ptr::swap_nonoverlapping_one(x, y);
+ }
+}
+
+/// Replaces `dest` with the default value of `T`, returning the previous `dest` value.
+///
+/// * If you want to replace the values of two variables, see [`swap`].
+/// * If you want to replace with a passed value instead of the default value, see [`replace`].
+///
+/// # Examples
+///
+/// A simple example:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v: Vec<i32> = vec![1, 2];
+///
+/// let old_v = mem::take(&mut v);
+/// assert_eq!(vec![1, 2], old_v);
+/// assert!(v.is_empty());
+/// ```
+///
+/// `take` allows taking ownership of a struct field by replacing it with an "empty" value.
+/// Without `take` you can run into issues like these:
+///
+/// ```compile_fail,E0507
+/// struct Buffer<T> { buf: Vec<T> }
+///
+/// impl<T> Buffer<T> {
+/// fn get_and_reset(&mut self) -> Vec<T> {
+/// // error: cannot move out of dereference of `&mut`-pointer
+/// let buf = self.buf;
+/// self.buf = Vec::new();
+/// buf
+/// }
+/// }
+/// ```
+///
+/// Note that `T` does not necessarily implement [`Clone`], so it can't even clone and reset
+/// `self.buf`. But `take` can be used to disassociate the original value of `self.buf` from
+/// `self`, allowing it to be returned:
+///
+/// ```
+/// use std::mem;
+///
+/// # struct Buffer<T> { buf: Vec<T> }
+/// impl<T> Buffer<T> {
+/// fn get_and_reset(&mut self) -> Vec<T> {
+/// mem::take(&mut self.buf)
+/// }
+/// }
+///
+/// let mut buffer = Buffer { buf: vec![0, 1] };
+/// assert_eq!(buffer.buf.len(), 2);
+///
+/// assert_eq!(buffer.get_and_reset(), vec![0, 1]);
+/// assert_eq!(buffer.buf.len(), 0);
+/// ```
+#[inline]
+#[stable(feature = "mem_take", since = "1.40.0")]
+pub fn take<T: Default>(dest: &mut T) -> T {
+ replace(dest, T::default())
+}
+
+/// Moves `src` into the referenced `dest`, returning the previous `dest` value.
+///
+/// Neither value is dropped.
+///
+/// * If you want to replace the values of two variables, see [`swap`].
+/// * If you want to replace with a default value, see [`take`].
+///
+/// # Examples
+///
+/// A simple example:
+///
+/// ```
+/// use std::mem;
+///
+/// let mut v: Vec<i32> = vec![1, 2];
+///
+/// let old_v = mem::replace(&mut v, vec![3, 4, 5]);
+/// assert_eq!(vec![1, 2], old_v);
+/// assert_eq!(vec![3, 4, 5], v);
+/// ```
+///
+/// `replace` allows consumption of a struct field by replacing it with another value.
+/// Without `replace` you can run into issues like these:
+///
+/// ```compile_fail,E0507
+/// struct Buffer<T> { buf: Vec<T> }
+///
+/// impl<T> Buffer<T> {
+/// fn replace_index(&mut self, i: usize, v: T) -> T {
+/// // error: cannot move out of dereference of `&mut`-pointer
+/// let t = self.buf[i];
+/// self.buf[i] = v;
+/// t
+/// }
+/// }
+/// ```
+///
+/// Note that `T` does not necessarily implement [`Clone`], so we can't even clone `self.buf[i]` to
+/// avoid the move. But `replace` can be used to disassociate the original value at that index from
+/// `self`, allowing it to be returned:
+///
+/// ```
+/// # #![allow(dead_code)]
+/// use std::mem;
+///
+/// # struct Buffer<T> { buf: Vec<T> }
+/// impl<T> Buffer<T> {
+/// fn replace_index(&mut self, i: usize, v: T) -> T {
+/// mem::replace(&mut self.buf[i], v)
+/// }
+/// }
+///
+/// let mut buffer = Buffer { buf: vec![0, 1] };
+/// assert_eq!(buffer.buf[0], 0);
+///
+/// assert_eq!(buffer.replace_index(0, 2), 0);
+/// assert_eq!(buffer.buf[0], 2);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "if you don't need the old value, you can just assign the new value directly"]
+pub fn replace<T>(dest: &mut T, mut src: T) -> T {
+ swap(dest, &mut src);
+ src
+}
+
+/// Disposes of a value.
+///
+/// This does so by calling the argument's implementation of [`Drop`][drop].
+///
+/// This effectively does nothing for types which implement `Copy`, e.g.
+/// integers. Such values are copied and _then_ moved into the function, so the
+/// value persists after this function call.
+///
+/// This function is not magic; it is literally defined as
+///
+/// ```
+/// pub fn drop<T>(_x: T) { }
+/// ```
+///
+/// Because `_x` is moved into the function, it is automatically dropped before
+/// the function returns.
+///
+/// [drop]: Drop
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+///
+/// drop(v); // explicitly drop the vector
+/// ```
+///
+/// Since [`RefCell`] enforces the borrow rules at runtime, `drop` can
+/// release a [`RefCell`] borrow:
+///
+/// ```
+/// use std::cell::RefCell;
+///
+/// let x = RefCell::new(1);
+///
+/// let mut mutable_borrow = x.borrow_mut();
+/// *mutable_borrow = 1;
+///
+/// drop(mutable_borrow); // relinquish the mutable borrow on this slot
+///
+/// let borrow = x.borrow();
+/// println!("{}", *borrow);
+/// ```
+///
+/// Integers and other types implementing [`Copy`] are unaffected by `drop`.
+///
+/// ```
+/// #[derive(Copy, Clone)]
+/// struct Foo(u8);
+///
+/// let x = 1;
+/// let y = Foo(2);
+/// drop(x); // a copy of `x` is moved and dropped
+/// drop(y); // a copy of `y` is moved and dropped
+///
+/// println!("x: {}, y: {}", x, y.0); // still available
+/// ```
+///
+/// [`RefCell`]: crate::cell::RefCell
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn drop<T>(_x: T) {}
+
+/// Interprets `src` as having type `&U`, and then reads `src` without moving
+/// the contained value.
+///
+/// This function will unsafely assume the pointer `src` is valid for [`size_of::<U>`][size_of]
+/// bytes by transmuting `&T` to `&U` and then reading the `&U` (except that this is done in a way
+/// that is correct even when `&U` makes stricter alignment requirements than `&T`). It will also
+/// unsafely create a copy of the contained value instead of moving out of `src`.
+///
+/// It is not a compile-time error if `T` and `U` have different sizes, but it
+/// is highly encouraged to only invoke this function where `T` and `U` have the
+/// same size. This function triggers [undefined behavior][ub] if `U` is larger than
+/// `T`.
+///
+/// [ub]: ../../reference/behavior-considered-undefined.html
+///
+/// # Examples
+///
+/// ```
+/// use std::mem;
+///
+/// #[repr(packed)]
+/// struct Foo {
+/// bar: u8,
+/// }
+///
+/// let foo_array = [10u8];
+///
+/// unsafe {
+/// // Copy the data from 'foo_array' and treat it as a 'Foo'
+/// let mut foo_struct: Foo = mem::transmute_copy(&foo_array);
+/// assert_eq!(foo_struct.bar, 10);
+///
+/// // Modify the copied data
+/// foo_struct.bar = 20;
+/// assert_eq!(foo_struct.bar, 20);
+/// }
+///
+/// // The contents of 'foo_array' should not have changed
+/// assert_eq!(foo_array, [10]);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
+ // If U has a higher alignment requirement, src may not be suitably aligned.
+ if align_of::<U>() > align_of::<T>() {
+ // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+ // The caller must guarantee that the actual transmutation is safe.
+ unsafe { ptr::read_unaligned(src as *const T as *const U) }
+ } else {
+ // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+ // We just checked that `src as *const U` was properly aligned.
+ // The caller must guarantee that the actual transmutation is safe.
+ unsafe { ptr::read(src as *const T as *const U) }
+ }
+}
+
+/// Opaque type representing the discriminant of an enum.
+///
+/// See the [`discriminant`] function in this module for more information.
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+pub struct Discriminant<T>(<T as DiscriminantKind>::Discriminant);
+
+// N.B. These trait implementations cannot be derived because we don't want any bounds on T.
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> Copy for Discriminant<T> {}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> clone::Clone for Discriminant<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> cmp::PartialEq for Discriminant<T> {
+ fn eq(&self, rhs: &Self) -> bool {
+ self.0 == rhs.0
+ }
+}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> cmp::Eq for Discriminant<T> {}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> hash::Hash for Discriminant<T> {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.0.hash(state);
+ }
+}
+
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+impl<T> fmt::Debug for Discriminant<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Discriminant").field(&self.0).finish()
+ }
+}
+
+/// Returns a value uniquely identifying the enum variant in `v`.
+///
+/// If `T` is not an enum, calling this function will not result in undefined behavior, but the
+/// return value is unspecified.
+///
+/// # Stability
+///
+/// The discriminant of an enum variant may change if the enum definition changes. A discriminant
+/// of some variant will not change between compilations with the same compiler.
+///
+/// # Examples
+///
+/// This can be used to compare enums that carry data, while disregarding
+/// the actual data:
+///
+/// ```
+/// use std::mem;
+///
+/// enum Foo { A(&'static str), B(i32), C(i32) }
+///
+/// assert_eq!(mem::discriminant(&Foo::A("bar")), mem::discriminant(&Foo::A("baz")));
+/// assert_eq!(mem::discriminant(&Foo::B(1)), mem::discriminant(&Foo::B(2)));
+/// assert_ne!(mem::discriminant(&Foo::B(3)), mem::discriminant(&Foo::C(3)));
+/// ```
+#[stable(feature = "discriminant_value", since = "1.21.0")]
+#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
+pub const fn discriminant<T>(v: &T) -> Discriminant<T> {
+ Discriminant(intrinsics::discriminant_value(v))
+}
+
+/// Returns the number of variants in the enum type `T`.
+///
+/// If `T` is not an enum, calling this function will not result in undefined behavior, but the
+/// return value is unspecified. Equally, if `T` is an enum with more variants than `usize::MAX`
+/// the return value is unspecified. Uninhabited variants will be counted.
+///
+/// # Examples
+///
+/// ```
+/// # #![feature(never_type)]
+/// # #![feature(variant_count)]
+///
+/// use std::mem;
+///
+/// enum Void {}
+/// enum Foo { A(&'static str), B(i32), C(i32) }
+///
+/// assert_eq!(mem::variant_count::<Void>(), 0);
+/// assert_eq!(mem::variant_count::<Foo>(), 3);
+///
+/// assert_eq!(mem::variant_count::<Option<!>>(), 2);
+/// assert_eq!(mem::variant_count::<Result<!, !>>(), 2);
+/// ```
+#[inline(always)]
+#[unstable(feature = "variant_count", issue = "73662")]
+#[rustc_const_unstable(feature = "variant_count", issue = "73662")]
+pub const fn variant_count<T>() -> usize {
+ intrinsics::variant_count::<T>()
+}
--- /dev/null
+//! Custom arbitrary-precision number (bignum) implementation.
+//!
+//! This is designed to avoid the heap allocation at expense of stack memory.
+//! The most used bignum type, `Big32x40`, is limited by 32 × 40 = 1,280 bits
+//! and will take at most 160 bytes of stack memory. This is more than enough
+//! for round-tripping all possible finite `f64` values.
+//!
+//! In principle it is possible to have multiple bignum types for different
+//! inputs, but we don't do so to avoid the code bloat. Each bignum is still
+//! tracked for the actual usages, so it normally doesn't matter.
+
+// This module is only for dec2flt and flt2dec, and only public because of coretests.
+// It is not intended to ever be stabilized.
+#![doc(hidden)]
+#![unstable(
+ feature = "core_private_bignum",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+#![macro_use]
+
+use crate::intrinsics;
+
+/// Arithmetic operations required by bignums.
+pub trait FullOps: Sized {
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self + other + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_add(self, other: Self, carry: bool) -> (bool /* carry */, Self);
+
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_mul(self, other: Self, carry: Self) -> (Self /* carry */, Self);
+
+ /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + other2 + carry`,
+ /// where `W` is the number of bits in `Self`.
+ fn full_mul_add(self, other: Self, other2: Self, carry: Self) -> (Self /* carry */, Self);
+
+ /// Returns `(quo, rem)` such that `borrow * 2^W + self = quo * other + rem`
+ /// and `0 <= rem < other`, where `W` is the number of bits in `Self`.
+ fn full_div_rem(self, other: Self, borrow: Self)
+ -> (Self /* quotient */, Self /* remainder */);
+}
+
+macro_rules! impl_full_ops {
+ ($($ty:ty: add($addfn:path), mul/div($bigty:ident);)*) => (
+ $(
+ impl FullOps for $ty {
+ fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
+ // This cannot overflow; the output is between `0` and `2 * 2^nbits - 1`.
+ // FIXME: will LLVM optimize this into ADC or similar?
+ let (v, carry1) = intrinsics::add_with_overflow(self, other);
+ let (v, carry2) = intrinsics::add_with_overflow(v, if carry {1} else {0});
+ (carry1 || carry2, v)
+ }
+
+ fn full_mul(self, other: $ty, carry: $ty) -> ($ty, $ty) {
+ // This cannot overflow;
+ // the output is between `0` and `2^nbits * (2^nbits - 1)`.
+ // FIXME: will LLVM optimize this into ADC or similar?
+ let v = (self as $bigty) * (other as $bigty) + (carry as $bigty);
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
+ }
+
+ fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
+ // This cannot overflow;
+ // the output is between `0` and `2^nbits * (2^nbits - 1)`.
+ let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) +
+ (carry as $bigty);
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
+ }
+
+ fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
+ debug_assert!(borrow < other);
+ // This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
+ let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
+ let rhs = other as $bigty;
+ ((lhs / rhs) as $ty, (lhs % rhs) as $ty)
+ }
+ }
+ )*
+ )
+}
+
+impl_full_ops! {
+ u8: add(intrinsics::u8_add_with_overflow), mul/div(u16);
+ u16: add(intrinsics::u16_add_with_overflow), mul/div(u32);
+ u32: add(intrinsics::u32_add_with_overflow), mul/div(u64);
+ // See RFC #521 for enabling this.
+ // u64: add(intrinsics::u64_add_with_overflow), mul/div(u128);
+}
+
+/// Table of powers of 5 representable in digits. Specifically, the largest {u8, u16, u32} value
+/// that's a power of five, plus the corresponding exponent. Used in `mul_pow5`.
+const SMALL_POW5: [(u64, usize); 3] = [(125, 3), (15625, 6), (1_220_703_125, 13)];
+
+macro_rules! define_bignum {
+ ($name:ident: type=$ty:ty, n=$n:expr) => {
+ /// Stack-allocated arbitrary-precision (up to certain limit) integer.
+ ///
+ /// This is backed by a fixed-size array of given type ("digit").
+ /// While the array is not very large (normally some hundred bytes),
+ /// copying it recklessly may result in the performance hit.
+ /// Thus this is intentionally not `Copy`.
+ ///
+ /// All operations available to bignums panic in the case of overflows.
+ /// The caller is responsible to use large enough bignum types.
+ pub struct $name {
+ /// One plus the offset to the maximum "digit" in use.
+ /// This does not decrease, so be aware of the computation order.
+ /// `base[size..]` should be zero.
+ size: usize,
+ /// Digits. `[a, b, c, ...]` represents `a + b*2^W + c*2^(2W) + ...`
+ /// where `W` is the number of bits in the digit type.
+ base: [$ty; $n],
+ }
+
+ impl $name {
+ /// Makes a bignum from one digit.
+ pub fn from_small(v: $ty) -> $name {
+ let mut base = [0; $n];
+ base[0] = v;
+ $name { size: 1, base: base }
+ }
+
+ /// Makes a bignum from `u64` value.
+ pub fn from_u64(mut v: u64) -> $name {
+ let mut base = [0; $n];
+ let mut sz = 0;
+ while v > 0 {
+ base[sz] = v as $ty;
+ v >>= <$ty>::BITS;
+ sz += 1;
+ }
+ $name { size: sz, base: base }
+ }
+
+ /// Returns the internal digits as a slice `[a, b, c, ...]` such that the numeric
+ /// value is `a + b * 2^W + c * 2^(2W) + ...` where `W` is the number of bits in
+ /// the digit type.
+ pub fn digits(&self) -> &[$ty] {
+ &self.base[..self.size]
+ }
+
+ /// Returns the `i`-th bit where bit 0 is the least significant one.
+ /// In other words, the bit with weight `2^i`.
+ pub fn get_bit(&self, i: usize) -> u8 {
+ let digitbits = <$ty>::BITS as usize;
+ let d = i / digitbits;
+ let b = i % digitbits;
+ ((self.base[d] >> b) & 1) as u8
+ }
+
+ /// Returns `true` if the bignum is zero.
+ pub fn is_zero(&self) -> bool {
+ self.digits().iter().all(|&v| v == 0)
+ }
+
+ /// Returns the number of bits necessary to represent this value. Note that zero
+ /// is considered to need 0 bits.
+ pub fn bit_length(&self) -> usize {
+ // Skip over the most significant digits which are zero.
+ let digits = self.digits();
+ let zeros = digits.iter().rev().take_while(|&&x| x == 0).count();
+ let end = digits.len() - zeros;
+ let nonzero = &digits[..end];
+
+ if nonzero.is_empty() {
+ // There are no non-zero digits, i.e., the number is zero.
+ return 0;
+ }
+ // This could be optimized with leading_zeros() and bit shifts, but that's
+ // probably not worth the hassle.
+ let digitbits = <$ty>::BITS as usize;
+ let mut i = nonzero.len() * digitbits - 1;
+ while self.get_bit(i) == 0 {
+ i -= 1;
+ }
+ i + 1
+ }
+
+ /// Adds `other` to itself and returns its own mutable reference.
+ pub fn add<'a>(&'a mut self, other: &$name) -> &'a mut $name {
+ use crate::cmp;
+ use crate::num::bignum::FullOps;
+
+ let mut sz = cmp::max(self.size, other.size);
+ let mut carry = false;
+ for (a, b) in self.base[..sz].iter_mut().zip(&other.base[..sz]) {
+ let (c, v) = (*a).full_add(*b, carry);
+ *a = v;
+ carry = c;
+ }
+ if carry {
+ self.base[sz] = 1;
+ sz += 1;
+ }
+ self.size = sz;
+ self
+ }
+
+ pub fn add_small(&mut self, other: $ty) -> &mut $name {
+ use crate::num::bignum::FullOps;
+
+ let (mut carry, v) = self.base[0].full_add(other, false);
+ self.base[0] = v;
+ let mut i = 1;
+ while carry {
+ let (c, v) = self.base[i].full_add(0, carry);
+ self.base[i] = v;
+ carry = c;
+ i += 1;
+ }
+ if i > self.size {
+ self.size = i;
+ }
+ self
+ }
+
+ /// Subtracts `other` from itself and returns its own mutable reference.
+ pub fn sub<'a>(&'a mut self, other: &$name) -> &'a mut $name {
+ use crate::cmp;
+ use crate::num::bignum::FullOps;
+
+ let sz = cmp::max(self.size, other.size);
+ let mut noborrow = true;
+ for (a, b) in self.base[..sz].iter_mut().zip(&other.base[..sz]) {
+ let (c, v) = (*a).full_add(!*b, noborrow);
+ *a = v;
+ noborrow = c;
+ }
+ assert!(noborrow);
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by a digit-sized `other` and returns its own
+ /// mutable reference.
+ pub fn mul_small(&mut self, other: $ty) -> &mut $name {
+ use crate::num::bignum::FullOps;
+
+ let mut sz = self.size;
+ let mut carry = 0;
+ for a in &mut self.base[..sz] {
+ let (c, v) = (*a).full_mul(other, carry);
+ *a = v;
+ carry = c;
+ }
+ if carry > 0 {
+ self.base[sz] = carry;
+ sz += 1;
+ }
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by `2^bits` and returns its own mutable reference.
+ pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
+ let digitbits = <$ty>::BITS as usize;
+ let digits = bits / digitbits;
+ let bits = bits % digitbits;
+
+ assert!(digits < $n);
+ debug_assert!(self.base[$n - digits..].iter().all(|&v| v == 0));
+ debug_assert!(bits == 0 || (self.base[$n - digits - 1] >> (digitbits - bits)) == 0);
+
+ // shift by `digits * digitbits` bits
+ for i in (0..self.size).rev() {
+ self.base[i + digits] = self.base[i];
+ }
+ for i in 0..digits {
+ self.base[i] = 0;
+ }
+
+ // shift by `bits` bits
+ let mut sz = self.size + digits;
+ if bits > 0 {
+ let last = sz;
+ let overflow = self.base[last - 1] >> (digitbits - bits);
+ if overflow > 0 {
+ self.base[last] = overflow;
+ sz += 1;
+ }
+ for i in (digits + 1..last).rev() {
+ self.base[i] =
+ (self.base[i] << bits) | (self.base[i - 1] >> (digitbits - bits));
+ }
+ self.base[digits] <<= bits;
+ // self.base[..digits] is zero, no need to shift
+ }
+
+ self.size = sz;
+ self
+ }
+
+ /// Multiplies itself by `5^e` and returns its own mutable reference.
+ pub fn mul_pow5(&mut self, mut e: usize) -> &mut $name {
+ use crate::mem;
+ use crate::num::bignum::SMALL_POW5;
+
+ // There are exactly n trailing zeros on 2^n, and the only relevant digit sizes
+ // are consecutive powers of two, so this is well suited index for the table.
+ let table_index = mem::size_of::<$ty>().trailing_zeros() as usize;
+ let (small_power, small_e) = SMALL_POW5[table_index];
+ let small_power = small_power as $ty;
+
+ // Multiply with the largest single-digit power as long as possible ...
+ while e >= small_e {
+ self.mul_small(small_power);
+ e -= small_e;
+ }
+
+ // ... then finish off the remainder.
+ let mut rest_power = 1;
+ for _ in 0..e {
+ rest_power *= 5;
+ }
+ self.mul_small(rest_power);
+
+ self
+ }
+
+ /// Multiplies itself by a number described by `other[0] + other[1] * 2^W +
+ /// other[2] * 2^(2W) + ...` (where `W` is the number of bits in the digit type)
+ /// and returns its own mutable reference.
+ pub fn mul_digits<'a>(&'a mut self, other: &[$ty]) -> &'a mut $name {
+ // the internal routine. works best when aa.len() <= bb.len().
+ fn mul_inner(ret: &mut [$ty; $n], aa: &[$ty], bb: &[$ty]) -> usize {
+ use crate::num::bignum::FullOps;
+
+ let mut retsz = 0;
+ for (i, &a) in aa.iter().enumerate() {
+ if a == 0 {
+ continue;
+ }
+ let mut sz = bb.len();
+ let mut carry = 0;
+ for (j, &b) in bb.iter().enumerate() {
+ let (c, v) = a.full_mul_add(b, ret[i + j], carry);
+ ret[i + j] = v;
+ carry = c;
+ }
+ if carry > 0 {
+ ret[i + sz] = carry;
+ sz += 1;
+ }
+ if retsz < i + sz {
+ retsz = i + sz;
+ }
+ }
+ retsz
+ }
+
+ let mut ret = [0; $n];
+ let retsz = if self.size < other.len() {
+ mul_inner(&mut ret, &self.digits(), other)
+ } else {
+ mul_inner(&mut ret, other, &self.digits())
+ };
+ self.base = ret;
+ self.size = retsz;
+ self
+ }
+
+ /// Divides itself by a digit-sized `other` and returns its own
+ /// mutable reference *and* the remainder.
+ pub fn div_rem_small(&mut self, other: $ty) -> (&mut $name, $ty) {
+ use crate::num::bignum::FullOps;
+
+ assert!(other > 0);
+
+ let sz = self.size;
+ let mut borrow = 0;
+ for a in self.base[..sz].iter_mut().rev() {
+ let (q, r) = (*a).full_div_rem(other, borrow);
+ *a = q;
+ borrow = r;
+ }
+ (self, borrow)
+ }
+
+ /// Divide self by another bignum, overwriting `q` with the quotient and `r` with the
+ /// remainder.
+ pub fn div_rem(&self, d: &$name, q: &mut $name, r: &mut $name) {
+ // Stupid slow base-2 long division taken from
+ // https://en.wikipedia.org/wiki/Division_algorithm
+ // FIXME use a greater base ($ty) for the long division.
+ assert!(!d.is_zero());
+ let digitbits = <$ty>::BITS as usize;
+ for digit in &mut q.base[..] {
+ *digit = 0;
+ }
+ for digit in &mut r.base[..] {
+ *digit = 0;
+ }
+ r.size = d.size;
+ q.size = 1;
+ let mut q_is_zero = true;
+ let end = self.bit_length();
+ for i in (0..end).rev() {
+ r.mul_pow2(1);
+ r.base[0] |= self.get_bit(i) as $ty;
+ if &*r >= d {
+ r.sub(d);
+ // Set bit `i` of q to 1.
+ let digit_idx = i / digitbits;
+ let bit_idx = i % digitbits;
+ if q_is_zero {
+ q.size = digit_idx + 1;
+ q_is_zero = false;
+ }
+ q.base[digit_idx] |= 1 << bit_idx;
+ }
+ }
+ debug_assert!(q.base[q.size..].iter().all(|&d| d == 0));
+ debug_assert!(r.base[r.size..].iter().all(|&d| d == 0));
+ }
+ }
+
+ impl crate::cmp::PartialEq for $name {
+ fn eq(&self, other: &$name) -> bool {
+ self.base[..] == other.base[..]
+ }
+ }
+
+ impl crate::cmp::Eq for $name {}
+
+ impl crate::cmp::PartialOrd for $name {
+ fn partial_cmp(&self, other: &$name) -> crate::option::Option<crate::cmp::Ordering> {
+ crate::option::Option::Some(self.cmp(other))
+ }
+ }
+
+ impl crate::cmp::Ord for $name {
+ fn cmp(&self, other: &$name) -> crate::cmp::Ordering {
+ use crate::cmp::max;
+ let sz = max(self.size, other.size);
+ let lhs = self.base[..sz].iter().cloned().rev();
+ let rhs = other.base[..sz].iter().cloned().rev();
+ lhs.cmp(rhs)
+ }
+ }
+
+ impl crate::clone::Clone for $name {
+ fn clone(&self) -> Self {
+ Self { size: self.size, base: self.base }
+ }
+ }
+
+ impl crate::fmt::Debug for $name {
+ fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
+ let sz = if self.size < 1 { 1 } else { self.size };
+ let digitlen = <$ty>::BITS as usize / 4;
+
+ write!(f, "{:#x}", self.base[sz - 1])?;
+ for &v in self.base[..sz - 1].iter().rev() {
+ write!(f, "_{:01$x}", v, digitlen)?;
+ }
+ crate::result::Result::Ok(())
+ }
+ }
+ };
+}
+
+/// The digit type for `Big32x40`.
+pub type Digit32 = u32;
+
+define_bignum!(Big32x40: type=Digit32, n=40);
+
+// this one is used for testing only.
+#[doc(hidden)]
+pub mod tests {
+ define_bignum!(Big8x3: type=u8, n=3);
+}
--- /dev/null
+//! The various algorithms from the paper.
+
+use crate::cmp::min;
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::num::dec2flt::num::{self, Big};
+use crate::num::dec2flt::rawfp::{self, fp_to_float, next_float, prev_float, RawFloat, Unpacked};
+use crate::num::dec2flt::table;
+use crate::num::diy_float::Fp;
+
+/// Number of significand bits in Fp
+const P: u32 = 64;
+
+// We simply store the best approximation for *all* exponents, so the variable "h" and the
+// associated conditions can be omitted. This trades performance for a couple kilobytes of space.
+
+fn power_of_ten(e: i16) -> Fp {
+ assert!(e >= table::MIN_E);
+ let i = e - table::MIN_E;
+ let sig = table::POWERS.0[i as usize];
+ let exp = table::POWERS.1[i as usize];
+ Fp { f: sig, e: exp }
+}
+
+// In most architectures, floating point operations have an explicit bit size, therefore the
+// precision of the computation is determined on a per-operation basis.
+#[cfg(any(not(target_arch = "x86"), target_feature = "sse2"))]
+mod fpu_precision {
+ pub fn set_precision<T>() {}
+}
+
+// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available.
+// The x87 FPU operates with 80 bits of precision by default, which means that operations will
+// round to 80 bits causing double rounding to happen when values are eventually represented as
+// 32/64 bit float values. To overcome this, the FPU control word can be set so that the
+// computations are performed in the desired precision.
+#[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+mod fpu_precision {
+ use crate::mem::size_of;
+
+ /// A structure used to preserve the original value of the FPU control word, so that it can be
+ /// restored when the structure is dropped.
+ ///
+ /// The x87 FPU is a 16-bits register whose fields are as follows:
+ ///
+ /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 |
+ /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:|
+ /// | | RC | PC | | PM | UM | OM | ZM | DM | IM |
+ ///
+ /// The documentation for all of the fields is available in the IA-32 Architectures Software
+ /// Developer's Manual (Volume 1).
+ ///
+ /// The only field which is relevant for the following code is PC, Precision Control. This
+ /// field determines the precision of the operations performed by the FPU. It can be set to:
+ /// - 0b00, single precision i.e., 32-bits
+ /// - 0b10, double precision i.e., 64-bits
+ /// - 0b11, double extended precision i.e., 80-bits (default state)
+ /// The 0b01 value is reserved and should not be used.
+ pub struct FPUControlWord(u16);
+
+ fn set_cw(cw: u16) {
+ // SAFETY: the `fldcw` instruction has been audited to be able to work correctly with
+ // any `u16`
+ unsafe {
+ asm!(
+ "fldcw ({})",
+ in(reg) &cw,
+ // FIXME: We are using ATT syntax to support LLVM 8 and LLVM 9.
+ options(att_syntax, nostack),
+ )
+ }
+ }
+
+ /// Sets the precision field of the FPU to `T` and returns a `FPUControlWord`.
+ pub fn set_precision<T>() -> FPUControlWord {
+ let mut cw = 0_u16;
+
+ // Compute the value for the Precision Control field that is appropriate for `T`.
+ let cw_precision = match size_of::<T>() {
+ 4 => 0x0000, // 32 bits
+ 8 => 0x0200, // 64 bits
+ _ => 0x0300, // default, 80 bits
+ };
+
+ // Get the original value of the control word to restore it later, when the
+ // `FPUControlWord` structure is dropped
+ // SAFETY: the `fnstcw` instruction has been audited to be able to work correctly with
+ // any `u16`
+ unsafe {
+ asm!(
+ "fnstcw ({})",
+ in(reg) &mut cw,
+ // FIXME: We are using ATT syntax to support LLVM 8 and LLVM 9.
+ options(att_syntax, nostack),
+ )
+ }
+
+ // Set the control word to the desired precision. This is achieved by masking away the old
+ // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above.
+ set_cw((cw & 0xFCFF) | cw_precision);
+
+ FPUControlWord(cw)
+ }
+
+ impl Drop for FPUControlWord {
+ fn drop(&mut self) {
+ set_cw(self.0)
+ }
+ }
+}
+
+/// The fast path of Bellerophon using machine-sized integers and floats.
+///
+/// This is extracted into a separate function so that it can be attempted before constructing
+/// a bignum.
+pub fn fast_path<T: RawFloat>(integral: &[u8], fractional: &[u8], e: i64) -> Option<T> {
+ let num_digits = integral.len() + fractional.len();
+ // log_10(f64::MAX_SIG) ~ 15.95. We compare the exact value to MAX_SIG near the end,
+ // this is just a quick, cheap rejection (and also frees the rest of the code from
+ // worrying about underflow).
+ if num_digits > 16 {
+ return None;
+ }
+ if e.abs() >= T::CEIL_LOG5_OF_MAX_SIG as i64 {
+ return None;
+ }
+ let f = num::from_str_unchecked(integral.iter().chain(fractional.iter()));
+ if f > T::MAX_SIG {
+ return None;
+ }
+
+ // The fast path crucially depends on arithmetic being rounded to the correct number of bits
+ // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision
+ // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit.
+ // The `set_precision` function takes care of setting the precision on architectures which
+ // require setting it by changing the global state (like the control word of the x87 FPU).
+ let _cw = fpu_precision::set_precision::<T>();
+
+ // The case e < 0 cannot be folded into the other branch. Negative powers result in
+ // a repeating fractional part in binary, which are rounded, which causes real
+ // (and occasionally quite significant!) errors in the final result.
+ if e >= 0 {
+ Some(T::from_int(f) * T::short_fast_pow10(e as usize))
+ } else {
+ Some(T::from_int(f) / T::short_fast_pow10(e.abs() as usize))
+ }
+}
+
+/// Algorithm Bellerophon is trivial code justified by non-trivial numeric analysis.
+///
+/// It rounds ``f`` to a float with 64 bit significand and multiplies it by the best approximation
+/// of `10^e` (in the same floating point format). This is often enough to get the correct result.
+/// However, when the result is close to halfway between two adjacent (ordinary) floats, the
+/// compound rounding error from multiplying two approximation means the result may be off by a
+/// few bits. When this happens, the iterative Algorithm R fixes things up.
+///
+/// The hand-wavy "close to halfway" is made precise by the numeric analysis in the paper.
+/// In the words of Clinger:
+///
+/// > Slop, expressed in units of the least significant bit, is an inclusive bound for the error
+/// > accumulated during the floating point calculation of the approximation to f * 10^e. (Slop is
+/// > not a bound for the true error, but bounds the difference between the approximation z and
+/// > the best possible approximation that uses p bits of significand.)
+pub fn bellerophon<T: RawFloat>(f: &Big, e: i16) -> T {
+ let slop = if f <= &Big::from_u64(T::MAX_SIG) {
+ // The cases abs(e) < log5(2^N) are in fast_path()
+ if e >= 0 { 0 } else { 3 }
+ } else {
+ if e >= 0 { 1 } else { 4 }
+ };
+ let z = rawfp::big_to_fp(f).mul(&power_of_ten(e)).normalize();
+ let exp_p_n = 1 << (P - T::SIG_BITS as u32);
+ let lowbits: i64 = (z.f % exp_p_n) as i64;
+ // Is the slop large enough to make a difference when
+ // rounding to n bits?
+ if (lowbits - exp_p_n as i64 / 2).abs() <= slop {
+ algorithm_r(f, e, fp_to_float(z))
+ } else {
+ fp_to_float(z)
+ }
+}
+
+/// An iterative algorithm that improves a floating point approximation of `f * 10^e`.
+///
+/// Each iteration gets one unit in the last place closer, which of course takes terribly long to
+/// converge if `z0` is even mildly off. Luckily, when used as fallback for Bellerophon, the
+/// starting approximation is off by at most one ULP.
+fn algorithm_r<T: RawFloat>(f: &Big, e: i16, z0: T) -> T {
+ let mut z = z0;
+ loop {
+ let raw = z.unpack();
+ let (m, k) = (raw.sig, raw.k);
+ let mut x = f.clone();
+ let mut y = Big::from_u64(m);
+
+ // Find positive integers `x`, `y` such that `x / y` is exactly `(f * 10^e) / (m * 2^k)`.
+ // This not only avoids dealing with the signs of `e` and `k`, we also eliminate the
+ // power of two common to `10^e` and `2^k` to make the numbers smaller.
+ make_ratio(&mut x, &mut y, e, k);
+
+ let m_digits = [(m & 0xFF_FF_FF_FF) as u32, (m >> 32) as u32];
+ // This is written a bit awkwardly because our bignums don't support
+ // negative numbers, so we use the absolute value + sign information.
+ // The multiplication with m_digits can't overflow. If `x` or `y` are large enough that
+ // we need to worry about overflow, then they are also large enough that `make_ratio` has
+ // reduced the fraction by a factor of 2^64 or more.
+ let (d2, d_negative) = if x >= y {
+ // Don't need x any more, save a clone().
+ x.sub(&y).mul_pow2(1).mul_digits(&m_digits);
+ (x, false)
+ } else {
+ // Still need y - make a copy.
+ let mut y = y.clone();
+ y.sub(&x).mul_pow2(1).mul_digits(&m_digits);
+ (y, true)
+ };
+
+ if d2 < y {
+ let mut d2_double = d2;
+ d2_double.mul_pow2(1);
+ if m == T::MIN_SIG && d_negative && d2_double > y {
+ z = prev_float(z);
+ } else {
+ return z;
+ }
+ } else if d2 == y {
+ if m % 2 == 0 {
+ if m == T::MIN_SIG && d_negative {
+ z = prev_float(z);
+ } else {
+ return z;
+ }
+ } else if d_negative {
+ z = prev_float(z);
+ } else {
+ z = next_float(z);
+ }
+ } else if d_negative {
+ z = prev_float(z);
+ } else {
+ z = next_float(z);
+ }
+ }
+}
+
+/// Given `x = f` and `y = m` where `f` represent input decimal digits as usual and `m` is the
+/// significand of a floating point approximation, make the ratio `x / y` equal to
+/// `(f * 10^e) / (m * 2^k)`, possibly reduced by a power of two both have in common.
+fn make_ratio(x: &mut Big, y: &mut Big, e: i16, k: i16) {
+ let (e_abs, k_abs) = (e.abs() as usize, k.abs() as usize);
+ if e >= 0 {
+ if k >= 0 {
+ // x = f * 10^e, y = m * 2^k, except that we reduce the fraction by some power of two.
+ let common = min(e_abs, k_abs);
+ x.mul_pow5(e_abs).mul_pow2(e_abs - common);
+ y.mul_pow2(k_abs - common);
+ } else {
+ // x = f * 10^e * 2^abs(k), y = m
+ // This can't overflow because it requires positive `e` and negative `k`, which can
+ // only happen for values extremely close to 1, which means that `e` and `k` will be
+ // comparatively tiny.
+ x.mul_pow5(e_abs).mul_pow2(e_abs + k_abs);
+ }
+ } else {
+ if k >= 0 {
+ // x = f, y = m * 10^abs(e) * 2^k
+ // This can't overflow either, see above.
+ y.mul_pow5(e_abs).mul_pow2(k_abs + e_abs);
+ } else {
+ // x = f * 2^abs(k), y = m * 10^abs(e), again reducing by a common power of two.
+ let common = min(e_abs, k_abs);
+ x.mul_pow2(k_abs - common);
+ y.mul_pow5(e_abs).mul_pow2(e_abs - common);
+ }
+ }
+}
+
+/// Conceptually, Algorithm M is the simplest way to convert a decimal to a float.
+///
+/// We form a ratio that is equal to `f * 10^e`, then throwing in powers of two until it gives
+/// a valid float significand. The binary exponent `k` is the number of times we multiplied
+/// numerator or denominator by two, i.e., at all times `f * 10^e` equals `(u / v) * 2^k`.
+/// When we have found out significand, we only need to round by inspecting the remainder of the
+/// division, which is done in helper functions further below.
+///
+/// This algorithm is super slow, even with the optimization described in `quick_start()`.
+/// However, it's the simplest of the algorithms to adapt for overflow, underflow, and subnormal
+/// results. This implementation takes over when Bellerophon and Algorithm R are overwhelmed.
+/// Detecting underflow and overflow is easy: The ratio still isn't an in-range significand,
+/// yet the minimum/maximum exponent has been reached. In the case of overflow, we simply return
+/// infinity.
+///
+/// Handling underflow and subnormals is trickier. One big problem is that, with the minimum
+/// exponent, the ratio might still be too large for a significand. See underflow() for details.
+pub fn algorithm_m<T: RawFloat>(f: &Big, e: i16) -> T {
+ let mut u;
+ let mut v;
+ let e_abs = e.abs() as usize;
+ let mut k = 0;
+ if e < 0 {
+ u = f.clone();
+ v = Big::from_small(1);
+ v.mul_pow5(e_abs).mul_pow2(e_abs);
+ } else {
+ // FIXME possible optimization: generalize big_to_fp so that we can do the equivalent of
+ // fp_to_float(big_to_fp(u)) here, only without the double rounding.
+ u = f.clone();
+ u.mul_pow5(e_abs).mul_pow2(e_abs);
+ v = Big::from_small(1);
+ }
+ quick_start::<T>(&mut u, &mut v, &mut k);
+ let mut rem = Big::from_small(0);
+ let mut x = Big::from_small(0);
+ let min_sig = Big::from_u64(T::MIN_SIG);
+ let max_sig = Big::from_u64(T::MAX_SIG);
+ loop {
+ u.div_rem(&v, &mut x, &mut rem);
+ if k == T::MIN_EXP_INT {
+ // We have to stop at the minimum exponent, if we wait until `k < T::MIN_EXP_INT`,
+ // then we'd be off by a factor of two. Unfortunately this means we have to special-
+ // case normal numbers with the minimum exponent.
+ // FIXME find a more elegant formulation, but run the `tiny-pow10` test to make sure
+ // that it's actually correct!
+ if x >= min_sig && x <= max_sig {
+ break;
+ }
+ return underflow(x, v, rem);
+ }
+ if k > T::MAX_EXP_INT {
+ return T::INFINITY;
+ }
+ if x < min_sig {
+ u.mul_pow2(1);
+ k -= 1;
+ } else if x > max_sig {
+ v.mul_pow2(1);
+ k += 1;
+ } else {
+ break;
+ }
+ }
+ let q = num::to_u64(&x);
+ let z = rawfp::encode_normal(Unpacked::new(q, k));
+ round_by_remainder(v, rem, q, z)
+}
+
+/// Skips over most Algorithm M iterations by checking the bit length.
+fn quick_start<T: RawFloat>(u: &mut Big, v: &mut Big, k: &mut i16) {
+ // The bit length is an estimate of the base two logarithm, and log(u / v) = log(u) - log(v).
+ // The estimate is off by at most 1, but always an under-estimate, so the error on log(u)
+ // and log(v) are of the same sign and cancel out (if both are large). Therefore the error
+ // for log(u / v) is at most one as well.
+ // The target ratio is one where u/v is in an in-range significand. Thus our termination
+ // condition is log2(u / v) being the significand bits, plus/minus one.
+ // FIXME Looking at the second bit could improve the estimate and avoid some more divisions.
+ let target_ratio = T::SIG_BITS as i16;
+ let log2_u = u.bit_length() as i16;
+ let log2_v = v.bit_length() as i16;
+ let mut u_shift: i16 = 0;
+ let mut v_shift: i16 = 0;
+ assert!(*k == 0);
+ loop {
+ if *k == T::MIN_EXP_INT {
+ // Underflow or subnormal. Leave it to the main function.
+ break;
+ }
+ if *k == T::MAX_EXP_INT {
+ // Overflow. Leave it to the main function.
+ break;
+ }
+ let log2_ratio = (log2_u + u_shift) - (log2_v + v_shift);
+ if log2_ratio < target_ratio - 1 {
+ u_shift += 1;
+ *k -= 1;
+ } else if log2_ratio > target_ratio + 1 {
+ v_shift += 1;
+ *k += 1;
+ } else {
+ break;
+ }
+ }
+ u.mul_pow2(u_shift as usize);
+ v.mul_pow2(v_shift as usize);
+}
+
+fn underflow<T: RawFloat>(x: Big, v: Big, rem: Big) -> T {
+ if x < Big::from_u64(T::MIN_SIG) {
+ let q = num::to_u64(&x);
+ let z = rawfp::encode_subnormal(q);
+ return round_by_remainder(v, rem, q, z);
+ }
+ // Ratio isn't an in-range significand with the minimum exponent, so we need to round off
+ // excess bits and adjust the exponent accordingly. The real value now looks like this:
+ //
+ // x lsb
+ // /--------------\/
+ // 1010101010101010.10101010101010 * 2^k
+ // \-----/\-------/ \------------/
+ // q trunc. (represented by rem)
+ //
+ // Therefore, when the rounded-off bits are != 0.5 ULP, they decide the rounding
+ // on their own. When they are equal and the remainder is non-zero, the value still
+ // needs to be rounded up. Only when the rounded off bits are 1/2 and the remainder
+ // is zero, we have a half-to-even situation.
+ let bits = x.bit_length();
+ let lsb = bits - T::SIG_BITS as usize;
+ let q = num::get_bits(&x, lsb, bits);
+ let k = T::MIN_EXP_INT + lsb as i16;
+ let z = rawfp::encode_normal(Unpacked::new(q, k));
+ let q_even = q % 2 == 0;
+ match num::compare_with_half_ulp(&x, lsb) {
+ Greater => next_float(z),
+ Less => z,
+ Equal if rem.is_zero() && q_even => z,
+ Equal => next_float(z),
+ }
+}
+
+/// Ordinary round-to-even, obfuscated by having to round based on the remainder of a division.
+fn round_by_remainder<T: RawFloat>(v: Big, r: Big, q: u64, z: T) -> T {
+ let mut v_minus_r = v;
+ v_minus_r.sub(&r);
+ if r < v_minus_r {
+ z
+ } else if r > v_minus_r {
+ next_float(z)
+ } else if q % 2 == 0 {
+ z
+ } else {
+ next_float(z)
+ }
+}
--- /dev/null
+//! Converting decimal strings into IEEE 754 binary floating point numbers.
+//!
+//! # Problem statement
+//!
+//! We are given a decimal string such as `12.34e56`. This string consists of integral (`12`),
+//! fractional (`45`), and exponent (`56`) parts. All parts are optional and interpreted as zero
+//! when missing.
+//!
+//! We seek the IEEE 754 floating point number that is closest to the exact value of the decimal
+//! string. It is well-known that many decimal strings do not have terminating representations in
+//! base two, so we round to 0.5 units in the last place (in other words, as well as possible).
+//! Ties, decimal values exactly half-way between two consecutive floats, are resolved with the
+//! half-to-even strategy, also known as banker's rounding.
+//!
+//! Needless to say, this is quite hard, both in terms of implementation complexity and in terms
+//! of CPU cycles taken.
+//!
+//! # Implementation
+//!
+//! First, we ignore signs. Or rather, we remove it at the very beginning of the conversion
+//! process and re-apply it at the very end. This is correct in all edge cases since IEEE
+//! floats are symmetric around zero, negating one simply flips the first bit.
+//!
+//! Then we remove the decimal point by adjusting the exponent: Conceptually, `12.34e56` turns
+//! into `1234e54`, which we describe with a positive integer `f = 1234` and an integer `e = 54`.
+//! The `(f, e)` representation is used by almost all code past the parsing stage.
+//!
+//! We then try a long chain of progressively more general and expensive special cases using
+//! machine-sized integers and small, fixed-sized floating point numbers (first `f32`/`f64`, then
+//! a type with 64 bit significand, `Fp`). When all these fail, we bite the bullet and resort to a
+//! simple but very slow algorithm that involved computing `f * 10^e` fully and doing an iterative
+//! search for the best approximation.
+//!
+//! Primarily, this module and its children implement the algorithms described in:
+//! "How to Read Floating Point Numbers Accurately" by William D. Clinger,
+//! available online: <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.4152>
+//!
+//! In addition, there are numerous helper functions that are used in the paper but not available
+//! in Rust (or at least in core). Our version is additionally complicated by the need to handle
+//! overflow and underflow and the desire to handle subnormal numbers. Bellerophon and
+//! Algorithm R have trouble with overflow, subnormals, and underflow. We conservatively switch to
+//! Algorithm M (with the modifications described in section 8 of the paper) well before the
+//! inputs get into the critical region.
+//!
+//! Another aspect that needs attention is the ``RawFloat`` trait by which almost all functions
+//! are parametrized. One might think that it's enough to parse to `f64` and cast the result to
+//! `f32`. Unfortunately this is not the world we live in, and this has nothing to do with using
+//! base two or half-to-even rounding.
+//!
+//! Consider for example two types `d2` and `d4` representing a decimal type with two decimal
+//! digits and four decimal digits each and take "0.01499" as input. Let's use half-up rounding.
+//! Going directly to two decimal digits gives `0.01`, but if we round to four digits first,
+//! we get `0.0150`, which is then rounded up to `0.02`. The same principle applies to other
+//! operations as well, if you want 0.5 ULP accuracy you need to do *everything* in full precision
+//! and round *exactly once, at the end*, by considering all truncated bits at once.
+//!
+//! FIXME: Although some code duplication is necessary, perhaps parts of the code could be shuffled
+//! around such that less code is duplicated. Large parts of the algorithms are independent of the
+//! float type to output, or only needs access to a few constants, which could be passed in as
+//! parameters.
+//!
+//! # Other
+//!
+//! The conversion should *never* panic. There are assertions and explicit panics in the code,
+//! but they should never be triggered and only serve as internal sanity checks. Any panics should
+//! be considered a bug.
+//!
+//! There are unit tests but they are woefully inadequate at ensuring correctness, they only cover
+//! a small percentage of possible errors. Far more extensive tests are located in the directory
+//! `src/etc/test-float-parse` as a Python script.
+//!
+//! A note on integer overflow: Many parts of this file perform arithmetic with the decimal
+//! exponent `e`. Primarily, we shift the decimal point around: Before the first decimal digit,
+//! after the last decimal digit, and so on. This could overflow if done carelessly. We rely on
+//! the parsing submodule to only hand out sufficiently small exponents, where "sufficient" means
+//! "such that the exponent +/- the number of decimal digits fits into a 64 bit integer".
+//! Larger exponents are accepted, but we don't do arithmetic with them, they are immediately
+//! turned into {positive,negative} {zero,infinity}.
+
+#![doc(hidden)]
+#![unstable(
+ feature = "dec2flt",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+use crate::fmt;
+use crate::str::FromStr;
+
+use self::num::digits_to_big;
+use self::parse::{parse_decimal, Decimal, ParseResult, Sign};
+use self::rawfp::RawFloat;
+
+mod algorithm;
+mod num;
+mod table;
+// These two have their own tests.
+pub mod parse;
+pub mod rawfp;
+
+macro_rules! from_str_float_impl {
+ ($t:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseFloatError;
+
+ /// Converts a string in base 10 to a float.
+ /// Accepts an optional decimal exponent.
+ ///
+ /// This function accepts strings such as
+ ///
+ /// * '3.14'
+ /// * '-3.14'
+ /// * '2.5E10', or equivalently, '2.5e10'
+ /// * '2.5E-10'
+ /// * '5.'
+ /// * '.5', or, equivalently, '0.5'
+ /// * 'inf', '-inf', 'NaN'
+ ///
+ /// Leading and trailing whitespace represent an error.
+ ///
+ /// # Grammar
+ ///
+ /// All strings that adhere to the following [EBNF] grammar
+ /// will result in an [`Ok`] being returned:
+ ///
+ /// ```txt
+ /// Float ::= Sign? ( 'inf' | 'NaN' | Number )
+ /// Number ::= ( Digit+ |
+ /// Digit+ '.' Digit* |
+ /// Digit* '.' Digit+ ) Exp?
+ /// Exp ::= [eE] Sign? Digit+
+ /// Sign ::= [+-]
+ /// Digit ::= [0-9]
+ /// ```
+ ///
+ /// [EBNF]: https://www.w3.org/TR/REC-xml/#sec-notation
+ ///
+ /// # Known bugs
+ ///
+ /// In some situations, some strings that should create a valid float
+ /// instead return an error. See [issue #31407] for details.
+ ///
+ /// [issue #31407]: https://github.com/rust-lang/rust/issues/31407
+ ///
+ /// # Arguments
+ ///
+ /// * src - A string
+ ///
+ /// # Return value
+ ///
+ /// `Err(ParseFloatError)` if the string did not represent a valid
+ /// number. Otherwise, `Ok(n)` where `n` is the floating-point
+ /// number represented by `src`.
+ #[inline]
+ fn from_str(src: &str) -> Result<Self, ParseFloatError> {
+ dec2flt(src)
+ }
+ }
+ };
+}
+from_str_float_impl!(f32);
+from_str_float_impl!(f64);
+
+/// An error which can be returned when parsing a float.
+///
+/// This error is used as the error type for the [`FromStr`] implementation
+/// for [`f32`] and [`f64`].
+///
+/// # Example
+///
+/// ```
+/// use std::str::FromStr;
+///
+/// if let Err(e) = f64::from_str("a.12") {
+/// println!("Failed conversion to f64: {}", e);
+/// }
+/// ```
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseFloatError {
+ kind: FloatErrorKind,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum FloatErrorKind {
+ Empty,
+ Invalid,
+}
+
+impl ParseFloatError {
+ #[unstable(
+ feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ FloatErrorKind::Empty => "cannot parse float from empty string",
+ FloatErrorKind::Invalid => "invalid float literal",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseFloatError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+fn pfe_empty() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Empty }
+}
+
+fn pfe_invalid() -> ParseFloatError {
+ ParseFloatError { kind: FloatErrorKind::Invalid }
+}
+
+/// Splits a decimal string into sign and the rest, without inspecting or validating the rest.
+fn extract_sign(s: &str) -> (Sign, &str) {
+ match s.as_bytes()[0] {
+ b'+' => (Sign::Positive, &s[1..]),
+ b'-' => (Sign::Negative, &s[1..]),
+ // If the string is invalid, we never use the sign, so we don't need to validate here.
+ _ => (Sign::Positive, s),
+ }
+}
+
+/// Converts a decimal string into a floating point number.
+fn dec2flt<T: RawFloat>(s: &str) -> Result<T, ParseFloatError> {
+ if s.is_empty() {
+ return Err(pfe_empty());
+ }
+ let (sign, s) = extract_sign(s);
+ let flt = match parse_decimal(s) {
+ ParseResult::Valid(decimal) => convert(decimal)?,
+ ParseResult::ShortcutToInf => T::INFINITY,
+ ParseResult::ShortcutToZero => T::ZERO,
+ ParseResult::Invalid => match s {
+ "inf" => T::INFINITY,
+ "NaN" => T::NAN,
+ _ => {
+ return Err(pfe_invalid());
+ }
+ },
+ };
+
+ match sign {
+ Sign::Positive => Ok(flt),
+ Sign::Negative => Ok(-flt),
+ }
+}
+
+/// The main workhorse for the decimal-to-float conversion: Orchestrate all the preprocessing
+/// and figure out which algorithm should do the actual conversion.
+fn convert<T: RawFloat>(mut decimal: Decimal<'_>) -> Result<T, ParseFloatError> {
+ simplify(&mut decimal);
+ if let Some(x) = trivial_cases(&decimal) {
+ return Ok(x);
+ }
+ // Remove/shift out the decimal point.
+ let e = decimal.exp - decimal.fractional.len() as i64;
+ if let Some(x) = algorithm::fast_path(decimal.integral, decimal.fractional, e) {
+ return Ok(x);
+ }
+ // Big32x40 is limited to 1280 bits, which translates to about 385 decimal digits.
+ // If we exceed this, we'll crash, so we error out before getting too close (within 10^10).
+ let upper_bound = bound_intermediate_digits(&decimal, e);
+ if upper_bound > 375 {
+ return Err(pfe_invalid());
+ }
+ let f = digits_to_big(decimal.integral, decimal.fractional);
+
+ // Now the exponent certainly fits in 16 bit, which is used throughout the main algorithms.
+ let e = e as i16;
+ // FIXME These bounds are rather conservative. A more careful analysis of the failure modes
+ // of Bellerophon could allow using it in more cases for a massive speed up.
+ let exponent_in_range = table::MIN_E <= e && e <= table::MAX_E;
+ let value_in_range = upper_bound <= T::MAX_NORMAL_DIGITS as u64;
+ if exponent_in_range && value_in_range {
+ Ok(algorithm::bellerophon(&f, e))
+ } else {
+ Ok(algorithm::algorithm_m(&f, e))
+ }
+}
+
+// As written, this optimizes badly (see #27130, though it refers to an old version of the code).
+// `inline(always)` is a workaround for that. There are only two call sites overall and it doesn't
+// make code size worse.
+
+/// Strip zeros where possible, even when this requires changing the exponent
+#[inline(always)]
+fn simplify(decimal: &mut Decimal<'_>) {
+ let is_zero = &|&&d: &&u8| -> bool { d == b'0' };
+ // Trimming these zeros does not change anything but may enable the fast path (< 15 digits).
+ let leading_zeros = decimal.integral.iter().take_while(is_zero).count();
+ decimal.integral = &decimal.integral[leading_zeros..];
+ let trailing_zeros = decimal.fractional.iter().rev().take_while(is_zero).count();
+ let end = decimal.fractional.len() - trailing_zeros;
+ decimal.fractional = &decimal.fractional[..end];
+ // Simplify numbers of the form 0.0...x and x...0.0, adjusting the exponent accordingly.
+ // This may not always be a win (possibly pushes some numbers out of the fast path), but it
+ // simplifies other parts significantly (notably, approximating the magnitude of the value).
+ if decimal.integral.is_empty() {
+ let leading_zeros = decimal.fractional.iter().take_while(is_zero).count();
+ decimal.fractional = &decimal.fractional[leading_zeros..];
+ decimal.exp -= leading_zeros as i64;
+ } else if decimal.fractional.is_empty() {
+ let trailing_zeros = decimal.integral.iter().rev().take_while(is_zero).count();
+ let end = decimal.integral.len() - trailing_zeros;
+ decimal.integral = &decimal.integral[..end];
+ decimal.exp += trailing_zeros as i64;
+ }
+}
+
+/// Returns a quick-an-dirty upper bound on the size (log10) of the largest value that Algorithm R
+/// and Algorithm M will compute while working on the given decimal.
+fn bound_intermediate_digits(decimal: &Decimal<'_>, e: i64) -> u64 {
+ // We don't need to worry too much about overflow here thanks to trivial_cases() and the
+ // parser, which filter out the most extreme inputs for us.
+ let f_len: u64 = decimal.integral.len() as u64 + decimal.fractional.len() as u64;
+ if e >= 0 {
+ // In the case e >= 0, both algorithms compute about `f * 10^e`. Algorithm R proceeds to
+ // do some complicated calculations with this but we can ignore that for the upper bound
+ // because it also reduces the fraction beforehand, so we have plenty of buffer there.
+ f_len + (e as u64)
+ } else {
+ // If e < 0, Algorithm R does roughly the same thing, but Algorithm M differs:
+ // It tries to find a positive number k such that `f << k / 10^e` is an in-range
+ // significand. This will result in about `2^53 * f * 10^e` < `10^17 * f * 10^e`.
+ // One input that triggers this is 0.33...33 (375 x 3).
+ f_len + (e.abs() as u64) + 17
+ }
+}
+
+/// Detects obvious overflows and underflows without even looking at the decimal digits.
+fn trivial_cases<T: RawFloat>(decimal: &Decimal<'_>) -> Option<T> {
+ // There were zeros but they were stripped by simplify()
+ if decimal.integral.is_empty() && decimal.fractional.is_empty() {
+ return Some(T::ZERO);
+ }
+ // This is a crude approximation of ceil(log10(the real value)). We don't need to worry too
+ // much about overflow here because the input length is tiny (at least compared to 2^64) and
+ // the parser already handles exponents whose absolute value is greater than 10^18
+ // (which is still 10^19 short of 2^64).
+ let max_place = decimal.exp + decimal.integral.len() as i64;
+ if max_place > T::INF_CUTOFF {
+ return Some(T::INFINITY);
+ } else if max_place < T::ZERO_CUTOFF {
+ return Some(T::ZERO);
+ }
+ None
+}
--- /dev/null
+//! Utility functions for bignums that don't make too much sense to turn into methods.
+
+// FIXME This module's name is a bit unfortunate, since other modules also import `core::num`.
+
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
+
+pub use crate::num::bignum::Big32x40 as Big;
+
+/// Test whether truncating all bits less significant than `ones_place` introduces
+/// a relative error less, equal, or greater than 0.5 ULP.
+pub fn compare_with_half_ulp(f: &Big, ones_place: usize) -> Ordering {
+ if ones_place == 0 {
+ return Less;
+ }
+ let half_bit = ones_place - 1;
+ if f.get_bit(half_bit) == 0 {
+ // < 0.5 ULP
+ return Less;
+ }
+ // If all remaining bits are zero, it's = 0.5 ULP, otherwise > 0.5
+ // If there are no more bits (half_bit == 0), the below also correctly returns Equal.
+ for i in 0..half_bit {
+ if f.get_bit(i) == 1 {
+ return Greater;
+ }
+ }
+ Equal
+}
+
+/// Converts an ASCII string containing only decimal digits to a `u64`.
+///
+/// Does not perform checks for overflow or invalid characters, so if the caller is not careful,
+/// the result is bogus and can panic (though it won't be `unsafe`). Additionally, empty strings
+/// are treated as zero. This function exists because
+///
+/// 1. using `FromStr` on `&[u8]` requires `from_utf8_unchecked`, which is bad, and
+/// 2. piecing together the results of `integral.parse()` and `fractional.parse()` is
+/// more complicated than this entire function.
+pub fn from_str_unchecked<'a, T>(bytes: T) -> u64
+where
+ T: IntoIterator<Item = &'a u8>,
+{
+ let mut result = 0;
+ for &c in bytes {
+ result = result * 10 + (c - b'0') as u64;
+ }
+ result
+}
+
+/// Converts a string of ASCII digits into a bignum.
+///
+/// Like `from_str_unchecked`, this function relies on the parser to weed out non-digits.
+pub fn digits_to_big(integral: &[u8], fractional: &[u8]) -> Big {
+ let mut f = Big::from_small(0);
+ for &c in integral.iter().chain(fractional) {
+ let n = (c - b'0') as u32;
+ f.mul_small(10);
+ f.add_small(n);
+ }
+ f
+}
+
+/// Unwraps a bignum into a 64 bit integer. Panics if the number is too large.
+pub fn to_u64(x: &Big) -> u64 {
+ assert!(x.bit_length() < 64);
+ let d = x.digits();
+ if d.len() < 2 { d[0] as u64 } else { (d[1] as u64) << 32 | d[0] as u64 }
+}
+
+/// Extracts a range of bits.
+
+/// Index 0 is the least significant bit and the range is half-open as usual.
+/// Panics if asked to extract more bits than fit into the return type.
+pub fn get_bits(x: &Big, start: usize, end: usize) -> u64 {
+ assert!(end - start <= 64);
+ let mut result: u64 = 0;
+ for i in (start..end).rev() {
+ result = result << 1 | x.get_bit(i) as u64;
+ }
+ result
+}
--- /dev/null
+//! Validating and decomposing a decimal string of the form:
+//!
+//! `(digits | digits? '.'? digits?) (('e' | 'E') ('+' | '-')? digits)?`
+//!
+//! In other words, standard floating-point syntax, with two exceptions: No sign, and no
+//! handling of "inf" and "NaN". These are handled by the driver function (super::dec2flt).
+//!
+//! Although recognizing valid inputs is relatively easy, this module also has to reject the
+//! countless invalid variations, never panic, and perform numerous checks that the other
+//! modules rely on to not panic (or overflow) in turn.
+//! To make matters worse, all that happens in a single pass over the input.
+//! So, be careful when modifying anything, and double-check with the other modules.
+use self::ParseResult::{Invalid, ShortcutToInf, ShortcutToZero, Valid};
+use super::num;
+
+#[derive(Debug)]
+pub enum Sign {
+ Positive,
+ Negative,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+/// The interesting parts of a decimal string.
+pub struct Decimal<'a> {
+ pub integral: &'a [u8],
+ pub fractional: &'a [u8],
+ /// The decimal exponent, guaranteed to have fewer than 18 decimal digits.
+ pub exp: i64,
+}
+
+impl<'a> Decimal<'a> {
+ pub fn new(integral: &'a [u8], fractional: &'a [u8], exp: i64) -> Decimal<'a> {
+ Decimal { integral, fractional, exp }
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub enum ParseResult<'a> {
+ Valid(Decimal<'a>),
+ ShortcutToInf,
+ ShortcutToZero,
+ Invalid,
+}
+
+/// Checks if the input string is a valid floating point number and if so, locate the integral
+/// part, the fractional part, and the exponent in it. Does not handle signs.
+pub fn parse_decimal(s: &str) -> ParseResult<'_> {
+ if s.is_empty() {
+ return Invalid;
+ }
+
+ let s = s.as_bytes();
+ let (integral, s) = eat_digits(s);
+
+ match s.first() {
+ None => Valid(Decimal::new(integral, b"", 0)),
+ Some(&b'e' | &b'E') => {
+ if integral.is_empty() {
+ return Invalid; // No digits before 'e'
+ }
+
+ parse_exp(integral, b"", &s[1..])
+ }
+ Some(&b'.') => {
+ let (fractional, s) = eat_digits(&s[1..]);
+ if integral.is_empty() && fractional.is_empty() {
+ // We require at least a single digit before or after the point.
+ return Invalid;
+ }
+
+ match s.first() {
+ None => Valid(Decimal::new(integral, fractional, 0)),
+ Some(&b'e' | &b'E') => parse_exp(integral, fractional, &s[1..]),
+ _ => Invalid, // Trailing junk after fractional part
+ }
+ }
+ _ => Invalid, // Trailing junk after first digit string
+ }
+}
+
+/// Carves off decimal digits up to the first non-digit character.
+fn eat_digits(s: &[u8]) -> (&[u8], &[u8]) {
+ let mut i = 0;
+ while i < s.len() && b'0' <= s[i] && s[i] <= b'9' {
+ i += 1;
+ }
+ (&s[..i], &s[i..])
+}
+
+/// Exponent extraction and error checking.
+fn parse_exp<'a>(integral: &'a [u8], fractional: &'a [u8], rest: &'a [u8]) -> ParseResult<'a> {
+ let (sign, rest) = match rest.first() {
+ Some(&b'-') => (Sign::Negative, &rest[1..]),
+ Some(&b'+') => (Sign::Positive, &rest[1..]),
+ _ => (Sign::Positive, rest),
+ };
+ let (mut number, trailing) = eat_digits(rest);
+ if !trailing.is_empty() {
+ return Invalid; // Trailing junk after exponent
+ }
+ if number.is_empty() {
+ return Invalid; // Empty exponent
+ }
+ // At this point, we certainly have a valid string of digits. It may be too long to put into
+ // an `i64`, but if it's that huge, the input is certainly zero or infinity. Since each zero
+ // in the decimal digits only adjusts the exponent by +/- 1, at exp = 10^18 the input would
+ // have to be 17 exabyte (!) of zeros to get even remotely close to being finite.
+ // This is not exactly a use case we need to cater to.
+ while number.first() == Some(&b'0') {
+ number = &number[1..];
+ }
+ if number.len() >= 18 {
+ return match sign {
+ Sign::Positive => ShortcutToInf,
+ Sign::Negative => ShortcutToZero,
+ };
+ }
+ let abs_exp = num::from_str_unchecked(number);
+ let e = match sign {
+ Sign::Positive => abs_exp as i64,
+ Sign::Negative => -(abs_exp as i64),
+ };
+ Valid(Decimal::new(integral, fractional, e))
+}
--- /dev/null
+//! Bit fiddling on positive IEEE 754 floats. Negative numbers aren't and needn't be handled.
+//! Normal floating point numbers have a canonical representation as (frac, exp) such that the
+//! value is 2<sup>exp</sup> * (1 + sum(frac[N-i] / 2<sup>i</sup>)) where N is the number of bits.
+//! Subnormals are slightly different and weird, but the same principle applies.
+//!
+//! Here, however, we represent them as (sig, k) with f positive, such that the value is f *
+//! 2<sup>e</sup>. Besides making the "hidden bit" explicit, this changes the exponent by the
+//! so-called mantissa shift.
+//!
+//! Put another way, normally floats are written as (1) but here they are written as (2):
+//!
+//! 1. `1.101100...11 * 2^m`
+//! 2. `1101100...11 * 2^n`
+//!
+//! We call (1) the **fractional representation** and (2) the **integral representation**.
+//!
+//! Many functions in this module only handle normal numbers. The dec2flt routines conservatively
+//! take the universally-correct slow path (Algorithm M) for very small and very large numbers.
+//! That algorithm needs only next_float() which does handle subnormals and zeros.
+use crate::cmp::Ordering::{Equal, Greater, Less};
+use crate::convert::{TryFrom, TryInto};
+use crate::fmt::{Debug, LowerExp};
+use crate::num::dec2flt::num::{self, Big};
+use crate::num::dec2flt::table;
+use crate::num::diy_float::Fp;
+use crate::num::FpCategory;
+use crate::num::FpCategory::{Infinite, Nan, Normal, Subnormal, Zero};
+use crate::ops::{Add, Div, Mul, Neg};
+
+#[derive(Copy, Clone, Debug)]
+pub struct Unpacked {
+ pub sig: u64,
+ pub k: i16,
+}
+
+impl Unpacked {
+ pub fn new(sig: u64, k: i16) -> Self {
+ Unpacked { sig, k }
+ }
+}
+
+/// A helper trait to avoid duplicating basically all the conversion code for `f32` and `f64`.
+///
+/// See the parent module's doc comment for why this is necessary.
+///
+/// Should **never ever** be implemented for other types or be used outside the dec2flt module.
+pub trait RawFloat:
+ Copy + Debug + LowerExp + Mul<Output = Self> + Div<Output = Self> + Neg<Output = Self>
+{
+ const INFINITY: Self;
+ const NAN: Self;
+ const ZERO: Self;
+
+ /// Type used by `to_bits` and `from_bits`.
+ type Bits: Add<Output = Self::Bits> + From<u8> + TryFrom<u64>;
+
+ /// Performs a raw transmutation to an integer.
+ fn to_bits(self) -> Self::Bits;
+
+ /// Performs a raw transmutation from an integer.
+ fn from_bits(v: Self::Bits) -> Self;
+
+ /// Returns the category that this number falls into.
+ fn classify(self) -> FpCategory;
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8);
+
+ /// Decodes the float.
+ fn unpack(self) -> Unpacked;
+
+ /// Casts from a small integer that can be represented exactly. Panic if the integer can't be
+ /// represented, the other code in this module makes sure to never let that happen.
+ fn from_int(x: u64) -> Self;
+
+ /// Gets the value 10<sup>e</sup> from a pre-computed table.
+ /// Panics for `e >= CEIL_LOG5_OF_MAX_SIG`.
+ fn short_fast_pow10(e: usize) -> Self;
+
+ /// What the name says. It's easier to hard code than juggling intrinsics and
+ /// hoping LLVM constant folds it.
+ const CEIL_LOG5_OF_MAX_SIG: i16;
+
+ // A conservative bound on the decimal digits of inputs that can't produce overflow or zero or
+ /// subnormals. Probably the decimal exponent of the maximum normal value, hence the name.
+ const MAX_NORMAL_DIGITS: usize;
+
+ /// When the most significant decimal digit has a place value greater than this, the number
+ /// is certainly rounded to infinity.
+ const INF_CUTOFF: i64;
+
+ /// When the most significant decimal digit has a place value less than this, the number
+ /// is certainly rounded to zero.
+ const ZERO_CUTOFF: i64;
+
+ /// The number of bits in the exponent.
+ const EXP_BITS: u8;
+
+ /// The number of bits in the significand, *including* the hidden bit.
+ const SIG_BITS: u8;
+
+ /// The number of bits in the significand, *excluding* the hidden bit.
+ const EXPLICIT_SIG_BITS: u8;
+
+ /// The maximum legal exponent in fractional representation.
+ const MAX_EXP: i16;
+
+ /// The minimum legal exponent in fractional representation, excluding subnormals.
+ const MIN_EXP: i16;
+
+ /// `MAX_EXP` for integral representation, i.e., with the shift applied.
+ const MAX_EXP_INT: i16;
+
+ /// `MAX_EXP` encoded (i.e., with offset bias)
+ const MAX_ENCODED_EXP: i16;
+
+ /// `MIN_EXP` for integral representation, i.e., with the shift applied.
+ const MIN_EXP_INT: i16;
+
+ /// The maximum normalized significand in integral representation.
+ const MAX_SIG: u64;
+
+ /// The minimal normalized significand in integral representation.
+ const MIN_SIG: u64;
+}
+
+// Mostly a workaround for #34344.
+macro_rules! other_constants {
+ ($type: ident) => {
+ const EXPLICIT_SIG_BITS: u8 = Self::SIG_BITS - 1;
+ const MAX_EXP: i16 = (1 << (Self::EXP_BITS - 1)) - 1;
+ const MIN_EXP: i16 = -<Self as RawFloat>::MAX_EXP + 1;
+ const MAX_EXP_INT: i16 = <Self as RawFloat>::MAX_EXP - (Self::SIG_BITS as i16 - 1);
+ const MAX_ENCODED_EXP: i16 = (1 << Self::EXP_BITS) - 1;
+ const MIN_EXP_INT: i16 = <Self as RawFloat>::MIN_EXP - (Self::SIG_BITS as i16 - 1);
+ const MAX_SIG: u64 = (1 << Self::SIG_BITS) - 1;
+ const MIN_SIG: u64 = 1 << (Self::SIG_BITS - 1);
+
+ const INFINITY: Self = $type::INFINITY;
+ const NAN: Self = $type::NAN;
+ const ZERO: Self = 0.0;
+ };
+}
+
+impl RawFloat for f32 {
+ type Bits = u32;
+
+ const SIG_BITS: u8 = 24;
+ const EXP_BITS: u8 = 8;
+ const CEIL_LOG5_OF_MAX_SIG: i16 = 11;
+ const MAX_NORMAL_DIGITS: usize = 35;
+ const INF_CUTOFF: i64 = 40;
+ const ZERO_CUTOFF: i64 = -48;
+ other_constants!(f32);
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits = self.to_bits();
+ let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
+ let mantissa =
+ if exponent == 0 { (bits & 0x7fffff) << 1 } else { (bits & 0x7fffff) | 0x800000 };
+ // Exponent bias + mantissa shift
+ exponent -= 127 + 23;
+ (mantissa as u64, exponent, sign)
+ }
+
+ fn unpack(self) -> Unpacked {
+ let (sig, exp, _sig) = self.integer_decode();
+ Unpacked::new(sig, exp)
+ }
+
+ fn from_int(x: u64) -> f32 {
+ // rkruppe is uncertain whether `as` rounds correctly on all platforms.
+ debug_assert!(x as f32 == fp_to_float(Fp { f: x, e: 0 }));
+ x as f32
+ }
+
+ fn short_fast_pow10(e: usize) -> Self {
+ table::F32_SHORT_POWERS[e]
+ }
+
+ fn classify(self) -> FpCategory {
+ self.classify()
+ }
+ fn to_bits(self) -> Self::Bits {
+ self.to_bits()
+ }
+ fn from_bits(v: Self::Bits) -> Self {
+ Self::from_bits(v)
+ }
+}
+
+impl RawFloat for f64 {
+ type Bits = u64;
+
+ const SIG_BITS: u8 = 53;
+ const EXP_BITS: u8 = 11;
+ const CEIL_LOG5_OF_MAX_SIG: i16 = 23;
+ const MAX_NORMAL_DIGITS: usize = 305;
+ const INF_CUTOFF: i64 = 310;
+ const ZERO_CUTOFF: i64 = -326;
+ other_constants!(f64);
+
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits = self.to_bits();
+ let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0xfffffffffffff) << 1
+ } else {
+ (bits & 0xfffffffffffff) | 0x10000000000000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 1023 + 52;
+ (mantissa, exponent, sign)
+ }
+
+ fn unpack(self) -> Unpacked {
+ let (sig, exp, _sig) = self.integer_decode();
+ Unpacked::new(sig, exp)
+ }
+
+ fn from_int(x: u64) -> f64 {
+ // rkruppe is uncertain whether `as` rounds correctly on all platforms.
+ debug_assert!(x as f64 == fp_to_float(Fp { f: x, e: 0 }));
+ x as f64
+ }
+
+ fn short_fast_pow10(e: usize) -> Self {
+ table::F64_SHORT_POWERS[e]
+ }
+
+ fn classify(self) -> FpCategory {
+ self.classify()
+ }
+ fn to_bits(self) -> Self::Bits {
+ self.to_bits()
+ }
+ fn from_bits(v: Self::Bits) -> Self {
+ Self::from_bits(v)
+ }
+}
+
+/// Converts an `Fp` to the closest machine float type.
+/// Does not handle subnormal results.
+pub fn fp_to_float<T: RawFloat>(x: Fp) -> T {
+ let x = x.normalize();
+ // x.f is 64 bit, so x.e has a mantissa shift of 63
+ let e = x.e + 63;
+ if e > T::MAX_EXP {
+ panic!("fp_to_float: exponent {} too large", e)
+ } else if e > T::MIN_EXP {
+ encode_normal(round_normal::<T>(x))
+ } else {
+ panic!("fp_to_float: exponent {} too small", e)
+ }
+}
+
+/// Round the 64-bit significand to T::SIG_BITS bits with half-to-even.
+/// Does not handle exponent overflow.
+pub fn round_normal<T: RawFloat>(x: Fp) -> Unpacked {
+ let excess = 64 - T::SIG_BITS as i16;
+ let half: u64 = 1 << (excess - 1);
+ let (q, rem) = (x.f >> excess, x.f & ((1 << excess) - 1));
+ assert_eq!(q << excess | rem, x.f);
+ // Adjust mantissa shift
+ let k = x.e + excess;
+ if rem < half {
+ Unpacked::new(q, k)
+ } else if rem == half && (q % 2) == 0 {
+ Unpacked::new(q, k)
+ } else if q == T::MAX_SIG {
+ Unpacked::new(T::MIN_SIG, k + 1)
+ } else {
+ Unpacked::new(q + 1, k)
+ }
+}
+
+/// Inverse of `RawFloat::unpack()` for normalized numbers.
+/// Panics if the significand or exponent are not valid for normalized numbers.
+pub fn encode_normal<T: RawFloat>(x: Unpacked) -> T {
+ debug_assert!(
+ T::MIN_SIG <= x.sig && x.sig <= T::MAX_SIG,
+ "encode_normal: significand not normalized"
+ );
+ // Remove the hidden bit
+ let sig_enc = x.sig & !(1 << T::EXPLICIT_SIG_BITS);
+ // Adjust the exponent for exponent bias and mantissa shift
+ let k_enc = x.k + T::MAX_EXP + T::EXPLICIT_SIG_BITS as i16;
+ debug_assert!(k_enc != 0 && k_enc < T::MAX_ENCODED_EXP, "encode_normal: exponent out of range");
+ // Leave sign bit at 0 ("+"), our numbers are all positive
+ let bits = (k_enc as u64) << T::EXPLICIT_SIG_BITS | sig_enc;
+ T::from_bits(bits.try_into().unwrap_or_else(|_| unreachable!()))
+}
+
+/// Construct a subnormal. A mantissa of 0 is allowed and constructs zero.
+pub fn encode_subnormal<T: RawFloat>(significand: u64) -> T {
+ assert!(significand < T::MIN_SIG, "encode_subnormal: not actually subnormal");
+ // Encoded exponent is 0, the sign bit is 0, so we just have to reinterpret the bits.
+ T::from_bits(significand.try_into().unwrap_or_else(|_| unreachable!()))
+}
+
+/// Approximate a bignum with an Fp. Rounds within 0.5 ULP with half-to-even.
+pub fn big_to_fp(f: &Big) -> Fp {
+ let end = f.bit_length();
+ assert!(end != 0, "big_to_fp: unexpectedly, input is zero");
+ let start = end.saturating_sub(64);
+ let leading = num::get_bits(f, start, end);
+ // We cut off all bits prior to the index `start`, i.e., we effectively right-shift by
+ // an amount of `start`, so this is also the exponent we need.
+ let e = start as i16;
+ let rounded_down = Fp { f: leading, e }.normalize();
+ // Round (half-to-even) depending on the truncated bits.
+ match num::compare_with_half_ulp(f, start) {
+ Less => rounded_down,
+ Equal if leading % 2 == 0 => rounded_down,
+ Equal | Greater => match leading.checked_add(1) {
+ Some(f) => Fp { f, e }.normalize(),
+ None => Fp { f: 1 << 63, e: e + 1 },
+ },
+ }
+}
+
+/// Finds the largest floating point number strictly smaller than the argument.
+/// Does not handle subnormals, zero, or exponent underflow.
+pub fn prev_float<T: RawFloat>(x: T) -> T {
+ match x.classify() {
+ Infinite => panic!("prev_float: argument is infinite"),
+ Nan => panic!("prev_float: argument is NaN"),
+ Subnormal => panic!("prev_float: argument is subnormal"),
+ Zero => panic!("prev_float: argument is zero"),
+ Normal => {
+ let Unpacked { sig, k } = x.unpack();
+ if sig == T::MIN_SIG {
+ encode_normal(Unpacked::new(T::MAX_SIG, k - 1))
+ } else {
+ encode_normal(Unpacked::new(sig - 1, k))
+ }
+ }
+ }
+}
+
+// Find the smallest floating point number strictly larger than the argument.
+// This operation is saturating, i.e., next_float(inf) == inf.
+// Unlike most code in this module, this function does handle zero, subnormals, and infinities.
+// However, like all other code here, it does not deal with NaN and negative numbers.
+pub fn next_float<T: RawFloat>(x: T) -> T {
+ match x.classify() {
+ Nan => panic!("next_float: argument is NaN"),
+ Infinite => T::INFINITY,
+ // This seems too good to be true, but it works.
+ // 0.0 is encoded as the all-zero word. Subnormals are 0x000m...m where m is the mantissa.
+ // In particular, the smallest subnormal is 0x0...01 and the largest is 0x000F...F.
+ // The smallest normal number is 0x0010...0, so this corner case works as well.
+ // If the increment overflows the mantissa, the carry bit increments the exponent as we
+ // want, and the mantissa bits become zero. Because of the hidden bit convention, this
+ // too is exactly what we want!
+ // Finally, f64::MAX + 1 = 7eff...f + 1 = 7ff0...0 = f64::INFINITY.
+ Zero | Subnormal | Normal => T::from_bits(x.to_bits() + T::Bits::from(1u8)),
+ }
+}
--- /dev/null
+//! Tables of approximations of powers of ten.
+//! DO NOT MODIFY: Generated by `src/etc/dec2flt_table.py`
+
+pub const MIN_E: i16 = -305;
+pub const MAX_E: i16 = 305;
+
+#[rustfmt::skip]
+pub const POWERS: ([u64; 611], [i16; 611]) = (
+ [
+ 0xe0b62e2929aba83c,
+ 0x8c71dcd9ba0b4926,
+ 0xaf8e5410288e1b6f,
+ 0xdb71e91432b1a24b,
+ 0x892731ac9faf056f,
+ 0xab70fe17c79ac6ca,
+ 0xd64d3d9db981787d,
+ 0x85f0468293f0eb4e,
+ 0xa76c582338ed2622,
+ 0xd1476e2c07286faa,
+ 0x82cca4db847945ca,
+ 0xa37fce126597973d,
+ 0xcc5fc196fefd7d0c,
+ 0xff77b1fcbebcdc4f,
+ 0x9faacf3df73609b1,
+ 0xc795830d75038c1e,
+ 0xf97ae3d0d2446f25,
+ 0x9becce62836ac577,
+ 0xc2e801fb244576d5,
+ 0xf3a20279ed56d48a,
+ 0x9845418c345644d7,
+ 0xbe5691ef416bd60c,
+ 0xedec366b11c6cb8f,
+ 0x94b3a202eb1c3f39,
+ 0xb9e08a83a5e34f08,
+ 0xe858ad248f5c22ca,
+ 0x91376c36d99995be,
+ 0xb58547448ffffb2e,
+ 0xe2e69915b3fff9f9,
+ 0x8dd01fad907ffc3c,
+ 0xb1442798f49ffb4b,
+ 0xdd95317f31c7fa1d,
+ 0x8a7d3eef7f1cfc52,
+ 0xad1c8eab5ee43b67,
+ 0xd863b256369d4a41,
+ 0x873e4f75e2224e68,
+ 0xa90de3535aaae202,
+ 0xd3515c2831559a83,
+ 0x8412d9991ed58092,
+ 0xa5178fff668ae0b6,
+ 0xce5d73ff402d98e4,
+ 0x80fa687f881c7f8e,
+ 0xa139029f6a239f72,
+ 0xc987434744ac874f,
+ 0xfbe9141915d7a922,
+ 0x9d71ac8fada6c9b5,
+ 0xc4ce17b399107c23,
+ 0xf6019da07f549b2b,
+ 0x99c102844f94e0fb,
+ 0xc0314325637a193a,
+ 0xf03d93eebc589f88,
+ 0x96267c7535b763b5,
+ 0xbbb01b9283253ca3,
+ 0xea9c227723ee8bcb,
+ 0x92a1958a7675175f,
+ 0xb749faed14125d37,
+ 0xe51c79a85916f485,
+ 0x8f31cc0937ae58d3,
+ 0xb2fe3f0b8599ef08,
+ 0xdfbdcece67006ac9,
+ 0x8bd6a141006042be,
+ 0xaecc49914078536d,
+ 0xda7f5bf590966849,
+ 0x888f99797a5e012d,
+ 0xaab37fd7d8f58179,
+ 0xd5605fcdcf32e1d7,
+ 0x855c3be0a17fcd26,
+ 0xa6b34ad8c9dfc070,
+ 0xd0601d8efc57b08c,
+ 0x823c12795db6ce57,
+ 0xa2cb1717b52481ed,
+ 0xcb7ddcdda26da269,
+ 0xfe5d54150b090b03,
+ 0x9efa548d26e5a6e2,
+ 0xc6b8e9b0709f109a,
+ 0xf867241c8cc6d4c1,
+ 0x9b407691d7fc44f8,
+ 0xc21094364dfb5637,
+ 0xf294b943e17a2bc4,
+ 0x979cf3ca6cec5b5b,
+ 0xbd8430bd08277231,
+ 0xece53cec4a314ebe,
+ 0x940f4613ae5ed137,
+ 0xb913179899f68584,
+ 0xe757dd7ec07426e5,
+ 0x9096ea6f3848984f,
+ 0xb4bca50b065abe63,
+ 0xe1ebce4dc7f16dfc,
+ 0x8d3360f09cf6e4bd,
+ 0xb080392cc4349ded,
+ 0xdca04777f541c568,
+ 0x89e42caaf9491b61,
+ 0xac5d37d5b79b6239,
+ 0xd77485cb25823ac7,
+ 0x86a8d39ef77164bd,
+ 0xa8530886b54dbdec,
+ 0xd267caa862a12d67,
+ 0x8380dea93da4bc60,
+ 0xa46116538d0deb78,
+ 0xcd795be870516656,
+ 0x806bd9714632dff6,
+ 0xa086cfcd97bf97f4,
+ 0xc8a883c0fdaf7df0,
+ 0xfad2a4b13d1b5d6c,
+ 0x9cc3a6eec6311a64,
+ 0xc3f490aa77bd60fd,
+ 0xf4f1b4d515acb93c,
+ 0x991711052d8bf3c5,
+ 0xbf5cd54678eef0b7,
+ 0xef340a98172aace5,
+ 0x9580869f0e7aac0f,
+ 0xbae0a846d2195713,
+ 0xe998d258869facd7,
+ 0x91ff83775423cc06,
+ 0xb67f6455292cbf08,
+ 0xe41f3d6a7377eeca,
+ 0x8e938662882af53e,
+ 0xb23867fb2a35b28e,
+ 0xdec681f9f4c31f31,
+ 0x8b3c113c38f9f37f,
+ 0xae0b158b4738705f,
+ 0xd98ddaee19068c76,
+ 0x87f8a8d4cfa417ca,
+ 0xa9f6d30a038d1dbc,
+ 0xd47487cc8470652b,
+ 0x84c8d4dfd2c63f3b,
+ 0xa5fb0a17c777cf0a,
+ 0xcf79cc9db955c2cc,
+ 0x81ac1fe293d599c0,
+ 0xa21727db38cb0030,
+ 0xca9cf1d206fdc03c,
+ 0xfd442e4688bd304b,
+ 0x9e4a9cec15763e2f,
+ 0xc5dd44271ad3cdba,
+ 0xf7549530e188c129,
+ 0x9a94dd3e8cf578ba,
+ 0xc13a148e3032d6e8,
+ 0xf18899b1bc3f8ca2,
+ 0x96f5600f15a7b7e5,
+ 0xbcb2b812db11a5de,
+ 0xebdf661791d60f56,
+ 0x936b9fcebb25c996,
+ 0xb84687c269ef3bfb,
+ 0xe65829b3046b0afa,
+ 0x8ff71a0fe2c2e6dc,
+ 0xb3f4e093db73a093,
+ 0xe0f218b8d25088b8,
+ 0x8c974f7383725573,
+ 0xafbd2350644eead0,
+ 0xdbac6c247d62a584,
+ 0x894bc396ce5da772,
+ 0xab9eb47c81f5114f,
+ 0xd686619ba27255a3,
+ 0x8613fd0145877586,
+ 0xa798fc4196e952e7,
+ 0xd17f3b51fca3a7a1,
+ 0x82ef85133de648c5,
+ 0xa3ab66580d5fdaf6,
+ 0xcc963fee10b7d1b3,
+ 0xffbbcfe994e5c620,
+ 0x9fd561f1fd0f9bd4,
+ 0xc7caba6e7c5382c9,
+ 0xf9bd690a1b68637b,
+ 0x9c1661a651213e2d,
+ 0xc31bfa0fe5698db8,
+ 0xf3e2f893dec3f126,
+ 0x986ddb5c6b3a76b8,
+ 0xbe89523386091466,
+ 0xee2ba6c0678b597f,
+ 0x94db483840b717f0,
+ 0xba121a4650e4ddec,
+ 0xe896a0d7e51e1566,
+ 0x915e2486ef32cd60,
+ 0xb5b5ada8aaff80b8,
+ 0xe3231912d5bf60e6,
+ 0x8df5efabc5979c90,
+ 0xb1736b96b6fd83b4,
+ 0xddd0467c64bce4a1,
+ 0x8aa22c0dbef60ee4,
+ 0xad4ab7112eb3929e,
+ 0xd89d64d57a607745,
+ 0x87625f056c7c4a8b,
+ 0xa93af6c6c79b5d2e,
+ 0xd389b47879823479,
+ 0x843610cb4bf160cc,
+ 0xa54394fe1eedb8ff,
+ 0xce947a3da6a9273e,
+ 0x811ccc668829b887,
+ 0xa163ff802a3426a9,
+ 0xc9bcff6034c13053,
+ 0xfc2c3f3841f17c68,
+ 0x9d9ba7832936edc1,
+ 0xc5029163f384a931,
+ 0xf64335bcf065d37d,
+ 0x99ea0196163fa42e,
+ 0xc06481fb9bcf8d3a,
+ 0xf07da27a82c37088,
+ 0x964e858c91ba2655,
+ 0xbbe226efb628afeb,
+ 0xeadab0aba3b2dbe5,
+ 0x92c8ae6b464fc96f,
+ 0xb77ada0617e3bbcb,
+ 0xe55990879ddcaabe,
+ 0x8f57fa54c2a9eab7,
+ 0xb32df8e9f3546564,
+ 0xdff9772470297ebd,
+ 0x8bfbea76c619ef36,
+ 0xaefae51477a06b04,
+ 0xdab99e59958885c5,
+ 0x88b402f7fd75539b,
+ 0xaae103b5fcd2a882,
+ 0xd59944a37c0752a2,
+ 0x857fcae62d8493a5,
+ 0xa6dfbd9fb8e5b88f,
+ 0xd097ad07a71f26b2,
+ 0x825ecc24c8737830,
+ 0xa2f67f2dfa90563b,
+ 0xcbb41ef979346bca,
+ 0xfea126b7d78186bd,
+ 0x9f24b832e6b0f436,
+ 0xc6ede63fa05d3144,
+ 0xf8a95fcf88747d94,
+ 0x9b69dbe1b548ce7d,
+ 0xc24452da229b021c,
+ 0xf2d56790ab41c2a3,
+ 0x97c560ba6b0919a6,
+ 0xbdb6b8e905cb600f,
+ 0xed246723473e3813,
+ 0x9436c0760c86e30c,
+ 0xb94470938fa89bcf,
+ 0xe7958cb87392c2c3,
+ 0x90bd77f3483bb9ba,
+ 0xb4ecd5f01a4aa828,
+ 0xe2280b6c20dd5232,
+ 0x8d590723948a535f,
+ 0xb0af48ec79ace837,
+ 0xdcdb1b2798182245,
+ 0x8a08f0f8bf0f156b,
+ 0xac8b2d36eed2dac6,
+ 0xd7adf884aa879177,
+ 0x86ccbb52ea94baeb,
+ 0xa87fea27a539e9a5,
+ 0xd29fe4b18e88640f,
+ 0x83a3eeeef9153e89,
+ 0xa48ceaaab75a8e2b,
+ 0xcdb02555653131b6,
+ 0x808e17555f3ebf12,
+ 0xa0b19d2ab70e6ed6,
+ 0xc8de047564d20a8c,
+ 0xfb158592be068d2f,
+ 0x9ced737bb6c4183d,
+ 0xc428d05aa4751e4d,
+ 0xf53304714d9265e0,
+ 0x993fe2c6d07b7fac,
+ 0xbf8fdb78849a5f97,
+ 0xef73d256a5c0f77d,
+ 0x95a8637627989aae,
+ 0xbb127c53b17ec159,
+ 0xe9d71b689dde71b0,
+ 0x9226712162ab070e,
+ 0xb6b00d69bb55c8d1,
+ 0xe45c10c42a2b3b06,
+ 0x8eb98a7a9a5b04e3,
+ 0xb267ed1940f1c61c,
+ 0xdf01e85f912e37a3,
+ 0x8b61313bbabce2c6,
+ 0xae397d8aa96c1b78,
+ 0xd9c7dced53c72256,
+ 0x881cea14545c7575,
+ 0xaa242499697392d3,
+ 0xd4ad2dbfc3d07788,
+ 0x84ec3c97da624ab5,
+ 0xa6274bbdd0fadd62,
+ 0xcfb11ead453994ba,
+ 0x81ceb32c4b43fcf5,
+ 0xa2425ff75e14fc32,
+ 0xcad2f7f5359a3b3e,
+ 0xfd87b5f28300ca0e,
+ 0x9e74d1b791e07e48,
+ 0xc612062576589ddb,
+ 0xf79687aed3eec551,
+ 0x9abe14cd44753b53,
+ 0xc16d9a0095928a27,
+ 0xf1c90080baf72cb1,
+ 0x971da05074da7bef,
+ 0xbce5086492111aeb,
+ 0xec1e4a7db69561a5,
+ 0x9392ee8e921d5d07,
+ 0xb877aa3236a4b449,
+ 0xe69594bec44de15b,
+ 0x901d7cf73ab0acd9,
+ 0xb424dc35095cd80f,
+ 0xe12e13424bb40e13,
+ 0x8cbccc096f5088cc,
+ 0xafebff0bcb24aaff,
+ 0xdbe6fecebdedd5bf,
+ 0x89705f4136b4a597,
+ 0xabcc77118461cefd,
+ 0xd6bf94d5e57a42bc,
+ 0x8637bd05af6c69b6,
+ 0xa7c5ac471b478423,
+ 0xd1b71758e219652c,
+ 0x83126e978d4fdf3b,
+ 0xa3d70a3d70a3d70a,
+ 0xcccccccccccccccd,
+ 0x8000000000000000,
+ 0xa000000000000000,
+ 0xc800000000000000,
+ 0xfa00000000000000,
+ 0x9c40000000000000,
+ 0xc350000000000000,
+ 0xf424000000000000,
+ 0x9896800000000000,
+ 0xbebc200000000000,
+ 0xee6b280000000000,
+ 0x9502f90000000000,
+ 0xba43b74000000000,
+ 0xe8d4a51000000000,
+ 0x9184e72a00000000,
+ 0xb5e620f480000000,
+ 0xe35fa931a0000000,
+ 0x8e1bc9bf04000000,
+ 0xb1a2bc2ec5000000,
+ 0xde0b6b3a76400000,
+ 0x8ac7230489e80000,
+ 0xad78ebc5ac620000,
+ 0xd8d726b7177a8000,
+ 0x878678326eac9000,
+ 0xa968163f0a57b400,
+ 0xd3c21bcecceda100,
+ 0x84595161401484a0,
+ 0xa56fa5b99019a5c8,
+ 0xcecb8f27f4200f3a,
+ 0x813f3978f8940984,
+ 0xa18f07d736b90be5,
+ 0xc9f2c9cd04674edf,
+ 0xfc6f7c4045812296,
+ 0x9dc5ada82b70b59e,
+ 0xc5371912364ce305,
+ 0xf684df56c3e01bc7,
+ 0x9a130b963a6c115c,
+ 0xc097ce7bc90715b3,
+ 0xf0bdc21abb48db20,
+ 0x96769950b50d88f4,
+ 0xbc143fa4e250eb31,
+ 0xeb194f8e1ae525fd,
+ 0x92efd1b8d0cf37be,
+ 0xb7abc627050305ae,
+ 0xe596b7b0c643c719,
+ 0x8f7e32ce7bea5c70,
+ 0xb35dbf821ae4f38c,
+ 0xe0352f62a19e306f,
+ 0x8c213d9da502de45,
+ 0xaf298d050e4395d7,
+ 0xdaf3f04651d47b4c,
+ 0x88d8762bf324cd10,
+ 0xab0e93b6efee0054,
+ 0xd5d238a4abe98068,
+ 0x85a36366eb71f041,
+ 0xa70c3c40a64e6c52,
+ 0xd0cf4b50cfe20766,
+ 0x82818f1281ed44a0,
+ 0xa321f2d7226895c8,
+ 0xcbea6f8ceb02bb3a,
+ 0xfee50b7025c36a08,
+ 0x9f4f2726179a2245,
+ 0xc722f0ef9d80aad6,
+ 0xf8ebad2b84e0d58c,
+ 0x9b934c3b330c8577,
+ 0xc2781f49ffcfa6d5,
+ 0xf316271c7fc3908b,
+ 0x97edd871cfda3a57,
+ 0xbde94e8e43d0c8ec,
+ 0xed63a231d4c4fb27,
+ 0x945e455f24fb1cf9,
+ 0xb975d6b6ee39e437,
+ 0xe7d34c64a9c85d44,
+ 0x90e40fbeea1d3a4b,
+ 0xb51d13aea4a488dd,
+ 0xe264589a4dcdab15,
+ 0x8d7eb76070a08aed,
+ 0xb0de65388cc8ada8,
+ 0xdd15fe86affad912,
+ 0x8a2dbf142dfcc7ab,
+ 0xacb92ed9397bf996,
+ 0xd7e77a8f87daf7fc,
+ 0x86f0ac99b4e8dafd,
+ 0xa8acd7c0222311bd,
+ 0xd2d80db02aabd62c,
+ 0x83c7088e1aab65db,
+ 0xa4b8cab1a1563f52,
+ 0xcde6fd5e09abcf27,
+ 0x80b05e5ac60b6178,
+ 0xa0dc75f1778e39d6,
+ 0xc913936dd571c84c,
+ 0xfb5878494ace3a5f,
+ 0x9d174b2dcec0e47b,
+ 0xc45d1df942711d9a,
+ 0xf5746577930d6501,
+ 0x9968bf6abbe85f20,
+ 0xbfc2ef456ae276e9,
+ 0xefb3ab16c59b14a3,
+ 0x95d04aee3b80ece6,
+ 0xbb445da9ca61281f,
+ 0xea1575143cf97227,
+ 0x924d692ca61be758,
+ 0xb6e0c377cfa2e12e,
+ 0xe498f455c38b997a,
+ 0x8edf98b59a373fec,
+ 0xb2977ee300c50fe7,
+ 0xdf3d5e9bc0f653e1,
+ 0x8b865b215899f46d,
+ 0xae67f1e9aec07188,
+ 0xda01ee641a708dea,
+ 0x884134fe908658b2,
+ 0xaa51823e34a7eedf,
+ 0xd4e5e2cdc1d1ea96,
+ 0x850fadc09923329e,
+ 0xa6539930bf6bff46,
+ 0xcfe87f7cef46ff17,
+ 0x81f14fae158c5f6e,
+ 0xa26da3999aef774a,
+ 0xcb090c8001ab551c,
+ 0xfdcb4fa002162a63,
+ 0x9e9f11c4014dda7e,
+ 0xc646d63501a1511e,
+ 0xf7d88bc24209a565,
+ 0x9ae757596946075f,
+ 0xc1a12d2fc3978937,
+ 0xf209787bb47d6b85,
+ 0x9745eb4d50ce6333,
+ 0xbd176620a501fc00,
+ 0xec5d3fa8ce427b00,
+ 0x93ba47c980e98ce0,
+ 0xb8a8d9bbe123f018,
+ 0xe6d3102ad96cec1e,
+ 0x9043ea1ac7e41393,
+ 0xb454e4a179dd1877,
+ 0xe16a1dc9d8545e95,
+ 0x8ce2529e2734bb1d,
+ 0xb01ae745b101e9e4,
+ 0xdc21a1171d42645d,
+ 0x899504ae72497eba,
+ 0xabfa45da0edbde69,
+ 0xd6f8d7509292d603,
+ 0x865b86925b9bc5c2,
+ 0xa7f26836f282b733,
+ 0xd1ef0244af2364ff,
+ 0x8335616aed761f1f,
+ 0xa402b9c5a8d3a6e7,
+ 0xcd036837130890a1,
+ 0x802221226be55a65,
+ 0xa02aa96b06deb0fe,
+ 0xc83553c5c8965d3d,
+ 0xfa42a8b73abbf48d,
+ 0x9c69a97284b578d8,
+ 0xc38413cf25e2d70e,
+ 0xf46518c2ef5b8cd1,
+ 0x98bf2f79d5993803,
+ 0xbeeefb584aff8604,
+ 0xeeaaba2e5dbf6785,
+ 0x952ab45cfa97a0b3,
+ 0xba756174393d88e0,
+ 0xe912b9d1478ceb17,
+ 0x91abb422ccb812ef,
+ 0xb616a12b7fe617aa,
+ 0xe39c49765fdf9d95,
+ 0x8e41ade9fbebc27d,
+ 0xb1d219647ae6b31c,
+ 0xde469fbd99a05fe3,
+ 0x8aec23d680043bee,
+ 0xada72ccc20054aea,
+ 0xd910f7ff28069da4,
+ 0x87aa9aff79042287,
+ 0xa99541bf57452b28,
+ 0xd3fa922f2d1675f2,
+ 0x847c9b5d7c2e09b7,
+ 0xa59bc234db398c25,
+ 0xcf02b2c21207ef2f,
+ 0x8161afb94b44f57d,
+ 0xa1ba1ba79e1632dc,
+ 0xca28a291859bbf93,
+ 0xfcb2cb35e702af78,
+ 0x9defbf01b061adab,
+ 0xc56baec21c7a1916,
+ 0xf6c69a72a3989f5c,
+ 0x9a3c2087a63f6399,
+ 0xc0cb28a98fcf3c80,
+ 0xf0fdf2d3f3c30b9f,
+ 0x969eb7c47859e744,
+ 0xbc4665b596706115,
+ 0xeb57ff22fc0c795a,
+ 0x9316ff75dd87cbd8,
+ 0xb7dcbf5354e9bece,
+ 0xe5d3ef282a242e82,
+ 0x8fa475791a569d11,
+ 0xb38d92d760ec4455,
+ 0xe070f78d3927556b,
+ 0x8c469ab843b89563,
+ 0xaf58416654a6babb,
+ 0xdb2e51bfe9d0696a,
+ 0x88fcf317f22241e2,
+ 0xab3c2fddeeaad25b,
+ 0xd60b3bd56a5586f2,
+ 0x85c7056562757457,
+ 0xa738c6bebb12d16d,
+ 0xd106f86e69d785c8,
+ 0x82a45b450226b39d,
+ 0xa34d721642b06084,
+ 0xcc20ce9bd35c78a5,
+ 0xff290242c83396ce,
+ 0x9f79a169bd203e41,
+ 0xc75809c42c684dd1,
+ 0xf92e0c3537826146,
+ 0x9bbcc7a142b17ccc,
+ 0xc2abf989935ddbfe,
+ 0xf356f7ebf83552fe,
+ 0x98165af37b2153df,
+ 0xbe1bf1b059e9a8d6,
+ 0xeda2ee1c7064130c,
+ 0x9485d4d1c63e8be8,
+ 0xb9a74a0637ce2ee1,
+ 0xe8111c87c5c1ba9a,
+ 0x910ab1d4db9914a0,
+ 0xb54d5e4a127f59c8,
+ 0xe2a0b5dc971f303a,
+ 0x8da471a9de737e24,
+ 0xb10d8e1456105dad,
+ 0xdd50f1996b947519,
+ 0x8a5296ffe33cc930,
+ 0xace73cbfdc0bfb7b,
+ 0xd8210befd30efa5a,
+ 0x8714a775e3e95c78,
+ 0xa8d9d1535ce3b396,
+ 0xd31045a8341ca07c,
+ 0x83ea2b892091e44e,
+ 0xa4e4b66b68b65d61,
+ 0xce1de40642e3f4b9,
+ 0x80d2ae83e9ce78f4,
+ 0xa1075a24e4421731,
+ 0xc94930ae1d529cfd,
+ 0xfb9b7cd9a4a7443c,
+ 0x9d412e0806e88aa6,
+ 0xc491798a08a2ad4f,
+ 0xf5b5d7ec8acb58a3,
+ 0x9991a6f3d6bf1766,
+ 0xbff610b0cc6edd3f,
+ 0xeff394dcff8a948f,
+ 0x95f83d0a1fb69cd9,
+ 0xbb764c4ca7a44410,
+ 0xea53df5fd18d5514,
+ 0x92746b9be2f8552c,
+ 0xb7118682dbb66a77,
+ 0xe4d5e82392a40515,
+ 0x8f05b1163ba6832d,
+ 0xb2c71d5bca9023f8,
+ 0xdf78e4b2bd342cf7,
+ 0x8bab8eefb6409c1a,
+ 0xae9672aba3d0c321,
+ 0xda3c0f568cc4f3e9,
+ 0x8865899617fb1871,
+ 0xaa7eebfb9df9de8e,
+ 0xd51ea6fa85785631,
+ 0x8533285c936b35df,
+ 0xa67ff273b8460357,
+ 0xd01fef10a657842c,
+ 0x8213f56a67f6b29c,
+ 0xa298f2c501f45f43,
+ 0xcb3f2f7642717713,
+ 0xfe0efb53d30dd4d8,
+ 0x9ec95d1463e8a507,
+ 0xc67bb4597ce2ce49,
+ 0xf81aa16fdc1b81db,
+ 0x9b10a4e5e9913129,
+ 0xc1d4ce1f63f57d73,
+ 0xf24a01a73cf2dcd0,
+ 0x976e41088617ca02,
+ 0xbd49d14aa79dbc82,
+ 0xec9c459d51852ba3,
+ 0x93e1ab8252f33b46,
+ 0xb8da1662e7b00a17,
+ 0xe7109bfba19c0c9d,
+ 0x906a617d450187e2,
+ 0xb484f9dc9641e9db,
+ 0xe1a63853bbd26451,
+ 0x8d07e33455637eb3,
+ 0xb049dc016abc5e60,
+ 0xdc5c5301c56b75f7,
+ 0x89b9b3e11b6329bb,
+ 0xac2820d9623bf429,
+ 0xd732290fbacaf134,
+ 0x867f59a9d4bed6c0,
+ 0xa81f301449ee8c70,
+ 0xd226fc195c6a2f8c,
+ 0x83585d8fd9c25db8,
+ 0xa42e74f3d032f526,
+ 0xcd3a1230c43fb26f,
+ 0x80444b5e7aa7cf85,
+ 0xa0555e361951c367,
+ 0xc86ab5c39fa63441,
+ 0xfa856334878fc151,
+ 0x9c935e00d4b9d8d2,
+ 0xc3b8358109e84f07,
+ 0xf4a642e14c6262c9,
+ 0x98e7e9cccfbd7dbe,
+ 0xbf21e44003acdd2d,
+ 0xeeea5d5004981478,
+ 0x95527a5202df0ccb,
+ 0xbaa718e68396cffe,
+ 0xe950df20247c83fd,
+ 0x91d28b7416cdd27e,
+ ],
+ [
+ -1077,
+ -1073,
+ -1070,
+ -1067,
+ -1063,
+ -1060,
+ -1057,
+ -1053,
+ -1050,
+ -1047,
+ -1043,
+ -1040,
+ -1037,
+ -1034,
+ -1030,
+ -1027,
+ -1024,
+ -1020,
+ -1017,
+ -1014,
+ -1010,
+ -1007,
+ -1004,
+ -1000,
+ -997,
+ -994,
+ -990,
+ -987,
+ -984,
+ -980,
+ -977,
+ -974,
+ -970,
+ -967,
+ -964,
+ -960,
+ -957,
+ -954,
+ -950,
+ -947,
+ -944,
+ -940,
+ -937,
+ -934,
+ -931,
+ -927,
+ -924,
+ -921,
+ -917,
+ -914,
+ -911,
+ -907,
+ -904,
+ -901,
+ -897,
+ -894,
+ -891,
+ -887,
+ -884,
+ -881,
+ -877,
+ -874,
+ -871,
+ -867,
+ -864,
+ -861,
+ -857,
+ -854,
+ -851,
+ -847,
+ -844,
+ -841,
+ -838,
+ -834,
+ -831,
+ -828,
+ -824,
+ -821,
+ -818,
+ -814,
+ -811,
+ -808,
+ -804,
+ -801,
+ -798,
+ -794,
+ -791,
+ -788,
+ -784,
+ -781,
+ -778,
+ -774,
+ -771,
+ -768,
+ -764,
+ -761,
+ -758,
+ -754,
+ -751,
+ -748,
+ -744,
+ -741,
+ -738,
+ -735,
+ -731,
+ -728,
+ -725,
+ -721,
+ -718,
+ -715,
+ -711,
+ -708,
+ -705,
+ -701,
+ -698,
+ -695,
+ -691,
+ -688,
+ -685,
+ -681,
+ -678,
+ -675,
+ -671,
+ -668,
+ -665,
+ -661,
+ -658,
+ -655,
+ -651,
+ -648,
+ -645,
+ -642,
+ -638,
+ -635,
+ -632,
+ -628,
+ -625,
+ -622,
+ -618,
+ -615,
+ -612,
+ -608,
+ -605,
+ -602,
+ -598,
+ -595,
+ -592,
+ -588,
+ -585,
+ -582,
+ -578,
+ -575,
+ -572,
+ -568,
+ -565,
+ -562,
+ -558,
+ -555,
+ -552,
+ -549,
+ -545,
+ -542,
+ -539,
+ -535,
+ -532,
+ -529,
+ -525,
+ -522,
+ -519,
+ -515,
+ -512,
+ -509,
+ -505,
+ -502,
+ -499,
+ -495,
+ -492,
+ -489,
+ -485,
+ -482,
+ -479,
+ -475,
+ -472,
+ -469,
+ -465,
+ -462,
+ -459,
+ -455,
+ -452,
+ -449,
+ -446,
+ -442,
+ -439,
+ -436,
+ -432,
+ -429,
+ -426,
+ -422,
+ -419,
+ -416,
+ -412,
+ -409,
+ -406,
+ -402,
+ -399,
+ -396,
+ -392,
+ -389,
+ -386,
+ -382,
+ -379,
+ -376,
+ -372,
+ -369,
+ -366,
+ -362,
+ -359,
+ -356,
+ -353,
+ -349,
+ -346,
+ -343,
+ -339,
+ -336,
+ -333,
+ -329,
+ -326,
+ -323,
+ -319,
+ -316,
+ -313,
+ -309,
+ -306,
+ -303,
+ -299,
+ -296,
+ -293,
+ -289,
+ -286,
+ -283,
+ -279,
+ -276,
+ -273,
+ -269,
+ -266,
+ -263,
+ -259,
+ -256,
+ -253,
+ -250,
+ -246,
+ -243,
+ -240,
+ -236,
+ -233,
+ -230,
+ -226,
+ -223,
+ -220,
+ -216,
+ -213,
+ -210,
+ -206,
+ -203,
+ -200,
+ -196,
+ -193,
+ -190,
+ -186,
+ -183,
+ -180,
+ -176,
+ -173,
+ -170,
+ -166,
+ -163,
+ -160,
+ -157,
+ -153,
+ -150,
+ -147,
+ -143,
+ -140,
+ -137,
+ -133,
+ -130,
+ -127,
+ -123,
+ -120,
+ -117,
+ -113,
+ -110,
+ -107,
+ -103,
+ -100,
+ -97,
+ -93,
+ -90,
+ -87,
+ -83,
+ -80,
+ -77,
+ -73,
+ -70,
+ -67,
+ -63,
+ -60,
+ -57,
+ -54,
+ -50,
+ -47,
+ -44,
+ -40,
+ -37,
+ -34,
+ -30,
+ -27,
+ -24,
+ -20,
+ -17,
+ -14,
+ -10,
+ -7,
+ -4,
+ 0,
+ 3,
+ 6,
+ 10,
+ 13,
+ 16,
+ 20,
+ 23,
+ 26,
+ 30,
+ 33,
+ 36,
+ 39,
+ 43,
+ 46,
+ 49,
+ 53,
+ 56,
+ 59,
+ 63,
+ 66,
+ 69,
+ 73,
+ 76,
+ 79,
+ 83,
+ 86,
+ 89,
+ 93,
+ 96,
+ 99,
+ 103,
+ 106,
+ 109,
+ 113,
+ 116,
+ 119,
+ 123,
+ 126,
+ 129,
+ 132,
+ 136,
+ 139,
+ 142,
+ 146,
+ 149,
+ 152,
+ 156,
+ 159,
+ 162,
+ 166,
+ 169,
+ 172,
+ 176,
+ 179,
+ 182,
+ 186,
+ 189,
+ 192,
+ 196,
+ 199,
+ 202,
+ 206,
+ 209,
+ 212,
+ 216,
+ 219,
+ 222,
+ 226,
+ 229,
+ 232,
+ 235,
+ 239,
+ 242,
+ 245,
+ 249,
+ 252,
+ 255,
+ 259,
+ 262,
+ 265,
+ 269,
+ 272,
+ 275,
+ 279,
+ 282,
+ 285,
+ 289,
+ 292,
+ 295,
+ 299,
+ 302,
+ 305,
+ 309,
+ 312,
+ 315,
+ 319,
+ 322,
+ 325,
+ 328,
+ 332,
+ 335,
+ 338,
+ 342,
+ 345,
+ 348,
+ 352,
+ 355,
+ 358,
+ 362,
+ 365,
+ 368,
+ 372,
+ 375,
+ 378,
+ 382,
+ 385,
+ 388,
+ 392,
+ 395,
+ 398,
+ 402,
+ 405,
+ 408,
+ 412,
+ 415,
+ 418,
+ 422,
+ 425,
+ 428,
+ 431,
+ 435,
+ 438,
+ 441,
+ 445,
+ 448,
+ 451,
+ 455,
+ 458,
+ 461,
+ 465,
+ 468,
+ 471,
+ 475,
+ 478,
+ 481,
+ 485,
+ 488,
+ 491,
+ 495,
+ 498,
+ 501,
+ 505,
+ 508,
+ 511,
+ 515,
+ 518,
+ 521,
+ 524,
+ 528,
+ 531,
+ 534,
+ 538,
+ 541,
+ 544,
+ 548,
+ 551,
+ 554,
+ 558,
+ 561,
+ 564,
+ 568,
+ 571,
+ 574,
+ 578,
+ 581,
+ 584,
+ 588,
+ 591,
+ 594,
+ 598,
+ 601,
+ 604,
+ 608,
+ 611,
+ 614,
+ 617,
+ 621,
+ 624,
+ 627,
+ 631,
+ 634,
+ 637,
+ 641,
+ 644,
+ 647,
+ 651,
+ 654,
+ 657,
+ 661,
+ 664,
+ 667,
+ 671,
+ 674,
+ 677,
+ 681,
+ 684,
+ 687,
+ 691,
+ 694,
+ 697,
+ 701,
+ 704,
+ 707,
+ 711,
+ 714,
+ 717,
+ 720,
+ 724,
+ 727,
+ 730,
+ 734,
+ 737,
+ 740,
+ 744,
+ 747,
+ 750,
+ 754,
+ 757,
+ 760,
+ 764,
+ 767,
+ 770,
+ 774,
+ 777,
+ 780,
+ 784,
+ 787,
+ 790,
+ 794,
+ 797,
+ 800,
+ 804,
+ 807,
+ 810,
+ 813,
+ 817,
+ 820,
+ 823,
+ 827,
+ 830,
+ 833,
+ 837,
+ 840,
+ 843,
+ 847,
+ 850,
+ 853,
+ 857,
+ 860,
+ 863,
+ 867,
+ 870,
+ 873,
+ 877,
+ 880,
+ 883,
+ 887,
+ 890,
+ 893,
+ 897,
+ 900,
+ 903,
+ 907,
+ 910,
+ 913,
+ 916,
+ 920,
+ 923,
+ 926,
+ 930,
+ 933,
+ 936,
+ 940,
+ 943,
+ 946,
+ 950,
+ ],
+);
+
+#[rustfmt::skip]
+pub const F32_SHORT_POWERS: [f32; 11] = [
+ 1e0,
+ 1e1,
+ 1e2,
+ 1e3,
+ 1e4,
+ 1e5,
+ 1e6,
+ 1e7,
+ 1e8,
+ 1e9,
+ 1e10,
+];
+
+#[rustfmt::skip]
+pub const F64_SHORT_POWERS: [f64; 23] = [
+ 1e0,
+ 1e1,
+ 1e2,
+ 1e3,
+ 1e4,
+ 1e5,
+ 1e6,
+ 1e7,
+ 1e8,
+ 1e9,
+ 1e10,
+ 1e11,
+ 1e12,
+ 1e13,
+ 1e14,
+ 1e15,
+ 1e16,
+ 1e17,
+ 1e18,
+ 1e19,
+ 1e20,
+ 1e21,
+ 1e22,
+];
--- /dev/null
+//! Extended precision "soft float", for internal use only.
+
+// This module is only for dec2flt and flt2dec, and only public because of coretests.
+// It is not intended to ever be stabilized.
+#![doc(hidden)]
+#![unstable(
+ feature = "core_private_diy_float",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+/// A custom 64-bit floating point type, representing `f * 2^e`.
+#[derive(Copy, Clone, Debug)]
+#[doc(hidden)]
+pub struct Fp {
+ /// The integer mantissa.
+ pub f: u64,
+ /// The exponent in base 2.
+ pub e: i16,
+}
+
+impl Fp {
+ /// Returns a correctly rounded product of itself and `other`.
+ pub fn mul(&self, other: &Fp) -> Fp {
+ const MASK: u64 = 0xffffffff;
+ let a = self.f >> 32;
+ let b = self.f & MASK;
+ let c = other.f >> 32;
+ let d = other.f & MASK;
+ let ac = a * c;
+ let bc = b * c;
+ let ad = a * d;
+ let bd = b * d;
+ let tmp = (bd >> 32) + (ad & MASK) + (bc & MASK) + (1 << 31) /* round */;
+ let f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ let e = self.e + other.e + 64;
+ Fp { f, e }
+ }
+
+ /// Normalizes itself so that the resulting mantissa is at least `2^63`.
+ pub fn normalize(&self) -> Fp {
+ let mut f = self.f;
+ let mut e = self.e;
+ if f >> (64 - 32) == 0 {
+ f <<= 32;
+ e -= 32;
+ }
+ if f >> (64 - 16) == 0 {
+ f <<= 16;
+ e -= 16;
+ }
+ if f >> (64 - 8) == 0 {
+ f <<= 8;
+ e -= 8;
+ }
+ if f >> (64 - 4) == 0 {
+ f <<= 4;
+ e -= 4;
+ }
+ if f >> (64 - 2) == 0 {
+ f <<= 2;
+ e -= 2;
+ }
+ if f >> (64 - 1) == 0 {
+ f <<= 1;
+ e -= 1;
+ }
+ debug_assert!(f >= (1 >> 63));
+ Fp { f, e }
+ }
+
+ /// Normalizes itself to have the shared exponent.
+ /// It can only decrease the exponent (and thus increase the mantissa).
+ pub fn normalize_to(&self, e: i16) -> Fp {
+ let edelta = self.e - e;
+ assert!(edelta >= 0);
+ let edelta = edelta as usize;
+ assert_eq!(self.f << edelta >> edelta, self.f);
+ Fp { f: self.f << edelta, e }
+ }
+}
--- /dev/null
+//! Error types for conversion to integral types.
+
+use crate::convert::Infallible;
+use crate::fmt;
+
+/// The error type returned when a checked integral type conversion fails.
+#[stable(feature = "try_from", since = "1.34.0")]
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+pub struct TryFromIntError(pub(crate) ());
+
+impl TryFromIntError {
+ #[unstable(
+ feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ "out of range integral type conversion attempted"
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl fmt::Display for TryFromIntError {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(fmt)
+ }
+}
+
+#[stable(feature = "try_from", since = "1.34.0")]
+impl From<Infallible> for TryFromIntError {
+ fn from(x: Infallible) -> TryFromIntError {
+ match x {}
+ }
+}
+
+#[unstable(feature = "never_type", issue = "35121")]
+impl From<!> for TryFromIntError {
+ fn from(never: !) -> TryFromIntError {
+ // Match rather than coerce to make sure that code like
+ // `From<Infallible> for TryFromIntError` above will keep working
+ // when `Infallible` becomes an alias to `!`.
+ match never {}
+ }
+}
+
+/// An error which can be returned when parsing an integer.
+///
+/// This error is used as the error type for the `from_str_radix()` functions
+/// on the primitive integer types, such as [`i8::from_str_radix`].
+///
+/// # Potential causes
+///
+/// Among other causes, `ParseIntError` can be thrown because of leading or trailing whitespace
+/// in the string e.g., when it is obtained from the standard input.
+/// Using the [`str.trim()`] method ensures that no whitespace remains before parsing.
+///
+/// [`str.trim()`]: ../../std/primitive.str.html#method.trim
+/// [`i8::from_str_radix`]: ../../std/primitive.i8.html#method.from_str_radix
+///
+/// # Example
+///
+/// ```
+/// if let Err(e) = i32::from_str_radix("a12", 10) {
+/// println!("Failed conversion to i32: {}", e);
+/// }
+/// ```
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseIntError {
+ pub(super) kind: IntErrorKind,
+}
+
+/// Enum to store the various types of errors that can cause parsing an integer to fail.
+///
+/// # Example
+///
+/// ```
+/// #![feature(int_error_matching)]
+///
+/// # fn main() {
+/// if let Err(e) = i32::from_str_radix("a12", 10) {
+/// println!("Failed conversion to i32: {:?}", e.kind());
+/// }
+/// # }
+/// ```
+#[unstable(
+ feature = "int_error_matching",
+ reason = "it can be useful to match errors when making error messages \
+ for integer parsing",
+ issue = "22639"
+)]
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum IntErrorKind {
+ /// Value being parsed is empty.
+ ///
+ /// Among other causes, this variant will be constructed when parsing an empty string.
+ Empty,
+ /// Contains an invalid digit in its context.
+ ///
+ /// Among other causes, this variant will be constructed when parsing a string that
+ /// contains a non-ASCII char.
+ ///
+ /// This variant is also constructed when a `+` or `-` is misplaced within a string
+ /// either on its own or in the middle of a number.
+ InvalidDigit,
+ /// Integer is too large to store in target integer type.
+ PosOverflow,
+ /// Integer is too small to store in target integer type.
+ NegOverflow,
+ /// Value was Zero
+ ///
+ /// This variant will be emitted when the parsing string has a value of zero, which
+ /// would be illegal for non-zero types.
+ Zero,
+}
+
+impl ParseIntError {
+ /// Outputs the detailed cause of parsing an integer failing.
+ #[unstable(
+ feature = "int_error_matching",
+ reason = "it can be useful to match errors when making error messages \
+ for integer parsing",
+ issue = "22639"
+ )]
+ pub fn kind(&self) -> &IntErrorKind {
+ &self.kind
+ }
+ #[unstable(
+ feature = "int_error_internals",
+ reason = "available through Error trait and this method should \
+ not be exposed publicly",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ IntErrorKind::Empty => "cannot parse integer from empty string",
+ IntErrorKind::InvalidDigit => "invalid digit found in string",
+ IntErrorKind::PosOverflow => "number too large to fit in target type",
+ IntErrorKind::NegOverflow => "number too small to fit in target type",
+ IntErrorKind::Zero => "number would be zero for non-zero type",
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseIntError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
--- /dev/null
+//! This module provides constants which are specific to the implementation
+//! of the `f32` floating point data type.
+//!
+//! *[See also the `f32` primitive type](../../std/primitive.f32.html).*
+//!
+//! Mathematically significant numbers are provided in the `consts` sub-module.
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::convert::FloatToInt;
+#[cfg(not(test))]
+use crate::intrinsics;
+use crate::mem;
+use crate::num::FpCategory;
+
+/// The radix or base of the internal representation of `f32`.
+/// Use [`f32::RADIX`](../../std/primitive.f32.html#associatedconstant.RADIX) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let r = std::f32::RADIX;
+///
+/// // intended way
+/// let r = f32::RADIX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const RADIX: u32 = f32::RADIX;
+
+/// Number of significant digits in base 2.
+/// Use [`f32::MANTISSA_DIGITS`](../../std/primitive.f32.html#associatedconstant.MANTISSA_DIGITS) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let d = std::f32::MANTISSA_DIGITS;
+///
+/// // intended way
+/// let d = f32::MANTISSA_DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MANTISSA_DIGITS: u32 = f32::MANTISSA_DIGITS;
+
+/// Approximate number of significant digits in base 10.
+/// Use [`f32::DIGITS`](../../std/primitive.f32.html#associatedconstant.DIGITS) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let d = std::f32::DIGITS;
+///
+/// // intended way
+/// let d = f32::DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const DIGITS: u32 = f32::DIGITS;
+
+/// [Machine epsilon] value for `f32`.
+/// Use [`f32::EPSILON`](../../std/primitive.f32.html#associatedconstant.EPSILON) instead.
+///
+/// This is the difference between `1.0` and the next larger representable number.
+///
+/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let e = std::f32::EPSILON;
+///
+/// // intended way
+/// let e = f32::EPSILON;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const EPSILON: f32 = f32::EPSILON;
+
+/// Smallest finite `f32` value.
+/// Use [`f32::MIN`](../../std/primitive.f32.html#associatedconstant.MIN) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f32::MIN;
+///
+/// // intended way
+/// let min = f32::MIN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN: f32 = f32::MIN;
+
+/// Smallest positive normal `f32` value.
+/// Use [`f32::MIN_POSITIVE`](../../std/primitive.f32.html#associatedconstant.MIN_POSITIVE) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f32::MIN_POSITIVE;
+///
+/// // intended way
+/// let min = f32::MIN_POSITIVE;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_POSITIVE: f32 = f32::MIN_POSITIVE;
+
+/// Largest finite `f32` value.
+/// Use [`f32::MAX`](../../std/primitive.f32.html#associatedconstant.MAX) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let max = std::f32::MAX;
+///
+/// // intended way
+/// let max = f32::MAX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX: f32 = f32::MAX;
+
+/// One greater than the minimum possible normal power of 2 exponent.
+/// Use [`f32::MIN_EXP`](../../std/primitive.f32.html#associatedconstant.MIN_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f32::MIN_EXP;
+///
+/// // intended way
+/// let min = f32::MIN_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_EXP: i32 = f32::MIN_EXP;
+
+/// Maximum possible power of 2 exponent.
+/// Use [`f32::MAX_EXP`](../../std/primitive.f32.html#associatedconstant.MAX_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let max = std::f32::MAX_EXP;
+///
+/// // intended way
+/// let max = f32::MAX_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX_EXP: i32 = f32::MAX_EXP;
+
+/// Minimum possible normal power of 10 exponent.
+/// Use [`f32::MIN_10_EXP`](../../std/primitive.f32.html#associatedconstant.MIN_10_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f32::MIN_10_EXP;
+///
+/// // intended way
+/// let min = f32::MIN_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_10_EXP: i32 = f32::MIN_10_EXP;
+
+/// Maximum possible power of 10 exponent.
+/// Use [`f32::MAX_10_EXP`](../../std/primitive.f32.html#associatedconstant.MAX_10_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let max = std::f32::MAX_10_EXP;
+///
+/// // intended way
+/// let max = f32::MAX_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX_10_EXP: i32 = f32::MAX_10_EXP;
+
+/// Not a Number (NaN).
+/// Use [`f32::NAN`](../../std/primitive.f32.html#associatedconstant.NAN) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let nan = std::f32::NAN;
+///
+/// // intended way
+/// let nan = f32::NAN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const NAN: f32 = f32::NAN;
+
+/// Infinity (∞).
+/// Use [`f32::INFINITY`](../../std/primitive.f32.html#associatedconstant.INFINITY) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let inf = std::f32::INFINITY;
+///
+/// // intended way
+/// let inf = f32::INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const INFINITY: f32 = f32::INFINITY;
+
+/// Negative infinity (−∞).
+/// Use [`f32::NEG_INFINITY`](../../std/primitive.f32.html#associatedconstant.NEG_INFINITY) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let ninf = std::f32::NEG_INFINITY;
+///
+/// // intended way
+/// let ninf = f32::NEG_INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const NEG_INFINITY: f32 = f32::NEG_INFINITY;
+
+/// Basic mathematical constants.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod consts {
+ // FIXME: replace with mathematical constants from cmath.
+
+ /// Archimedes' constant (π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
+
+ /// The full circle constant (τ)
+ ///
+ /// Equal to 2π.
+ #[stable(feature = "tau_constant", since = "1.47.0")]
+ pub const TAU: f32 = 6.28318530717958647692528676655900577_f32;
+
+ /// π/2
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
+
+ /// π/3
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
+
+ /// π/4
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
+
+ /// π/6
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
+
+ /// π/8
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
+
+ /// 1/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
+
+ /// 2/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
+
+ /// 2/sqrt(π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
+
+ /// sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
+
+ /// 1/sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
+
+ /// Euler's number (e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const E: f32 = 2.71828182845904523536028747135266250_f32;
+
+ /// log<sub>2</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
+
+ /// log<sub>2</sub>(10)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG2_10: f32 = 3.32192809488736234787031942948939018_f32;
+
+ /// log<sub>10</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
+
+ /// log<sub>10</sub>(2)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG10_2: f32 = 0.301029995663981195213738894724493027_f32;
+
+ /// ln(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
+
+ /// ln(10)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
+}
+
+#[lang = "f32"]
+#[cfg(not(test))]
+impl f32 {
+ /// The radix or base of the internal representation of `f32`.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const RADIX: u32 = 2;
+
+ /// Number of significant digits in base 2.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MANTISSA_DIGITS: u32 = 24;
+
+ /// Approximate number of significant digits in base 10.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const DIGITS: u32 = 6;
+
+ /// [Machine epsilon] value for `f32`.
+ ///
+ /// This is the difference between `1.0` and the next larger representable number.
+ ///
+ /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const EPSILON: f32 = 1.19209290e-07_f32;
+
+ /// Smallest finite `f32` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: f32 = -3.40282347e+38_f32;
+ /// Smallest positive normal `f32` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
+ /// Largest finite `f32` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: f32 = 3.40282347e+38_f32;
+
+ /// One greater than the minimum possible normal power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_EXP: i32 = -125;
+ /// Maximum possible power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_EXP: i32 = 128;
+
+ /// Minimum possible normal power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_10_EXP: i32 = -37;
+ /// Maximum possible power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_10_EXP: i32 = 38;
+
+ /// Not a Number (NaN).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NAN: f32 = 0.0_f32 / 0.0_f32;
+ /// Infinity (∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const INFINITY: f32 = 1.0_f32 / 0.0_f32;
+ /// Negative infinity (−∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32;
+
+ /// Returns `true` if this value is `NaN`.
+ ///
+ /// ```
+ /// let nan = f32::NAN;
+ /// let f = 7.0_f32;
+ ///
+ /// assert!(nan.is_nan());
+ /// assert!(!f.is_nan());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_nan(self) -> bool {
+ self != self
+ }
+
+ // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // concerns about portability, so this implementation is for
+ // private use internally.
+ #[inline]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ const fn abs_private(self) -> f32 {
+ f32::from_bits(self.to_bits() & 0x7fff_ffff)
+ }
+
+ /// Returns `true` if this value is positive infinity or negative infinity, and
+ /// `false` otherwise.
+ ///
+ /// ```
+ /// let f = 7.0f32;
+ /// let inf = f32::INFINITY;
+ /// let neg_inf = f32::NEG_INFINITY;
+ /// let nan = f32::NAN;
+ ///
+ /// assert!(!f.is_infinite());
+ /// assert!(!nan.is_infinite());
+ ///
+ /// assert!(inf.is_infinite());
+ /// assert!(neg_inf.is_infinite());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_infinite(self) -> bool {
+ self.abs_private() == Self::INFINITY
+ }
+
+ /// Returns `true` if this number is neither infinite nor `NaN`.
+ ///
+ /// ```
+ /// let f = 7.0f32;
+ /// let inf = f32::INFINITY;
+ /// let neg_inf = f32::NEG_INFINITY;
+ /// let nan = f32::NAN;
+ ///
+ /// assert!(f.is_finite());
+ ///
+ /// assert!(!nan.is_finite());
+ /// assert!(!inf.is_finite());
+ /// assert!(!neg_inf.is_finite());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_finite(self) -> bool {
+ // There's no need to handle NaN separately: if self is NaN,
+ // the comparison is not true, exactly as desired.
+ self.abs_private() < Self::INFINITY
+ }
+
+ /// Returns `true` if the number is neither zero, infinite,
+ /// [subnormal], or `NaN`.
+ ///
+ /// ```
+ /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32
+ /// let max = f32::MAX;
+ /// let lower_than_min = 1.0e-40_f32;
+ /// let zero = 0.0_f32;
+ ///
+ /// assert!(min.is_normal());
+ /// assert!(max.is_normal());
+ ///
+ /// assert!(!zero.is_normal());
+ /// assert!(!f32::NAN.is_normal());
+ /// assert!(!f32::INFINITY.is_normal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(!lower_than_min.is_normal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_normal(self) -> bool {
+ matches!(self.classify(), FpCategory::Normal)
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ ///
+ /// ```
+ /// use std::num::FpCategory;
+ ///
+ /// let num = 12.4_f32;
+ /// let inf = f32::INFINITY;
+ ///
+ /// assert_eq!(num.classify(), FpCategory::Normal);
+ /// assert_eq!(inf.classify(), FpCategory::Infinite);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ pub const fn classify(self) -> FpCategory {
+ const EXP_MASK: u32 = 0x7f800000;
+ const MAN_MASK: u32 = 0x007fffff;
+
+ let bits = self.to_bits();
+ match (bits & MAN_MASK, bits & EXP_MASK) {
+ (0, 0) => FpCategory::Zero,
+ (_, 0) => FpCategory::Subnormal,
+ (0, EXP_MASK) => FpCategory::Infinite,
+ (_, EXP_MASK) => FpCategory::Nan,
+ _ => FpCategory::Normal,
+ }
+ }
+
+ /// Returns `true` if `self` has a positive sign, including `+0.0`, `NaN`s with
+ /// positive sign bit and positive infinity.
+ ///
+ /// ```
+ /// let f = 7.0_f32;
+ /// let g = -7.0_f32;
+ ///
+ /// assert!(f.is_sign_positive());
+ /// assert!(!g.is_sign_positive());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_positive(self) -> bool {
+ !self.is_sign_negative()
+ }
+
+ /// Returns `true` if `self` has a negative sign, including `-0.0`, `NaN`s with
+ /// negative sign bit and negative infinity.
+ ///
+ /// ```
+ /// let f = 7.0f32;
+ /// let g = -7.0f32;
+ ///
+ /// assert!(!f.is_sign_negative());
+ /// assert!(g.is_sign_negative());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_negative(self) -> bool {
+ // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
+ // applies to zeros and NaNs as well.
+ self.to_bits() & 0x8000_0000 != 0
+ }
+
+ /// Takes the reciprocal (inverse) of a number, `1/x`.
+ ///
+ /// ```
+ /// let x = 2.0_f32;
+ /// let abs_difference = (x.recip() - (1.0 / x)).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn recip(self) -> f32 {
+ 1.0 / self
+ }
+
+ /// Converts radians to degrees.
+ ///
+ /// ```
+ /// let angle = std::f32::consts::PI;
+ ///
+ /// let abs_difference = (angle.to_degrees() - 180.0).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[stable(feature = "f32_deg_rad_conversions", since = "1.7.0")]
+ #[inline]
+ pub fn to_degrees(self) -> f32 {
+ // Use a constant for better precision.
+ const PIS_IN_180: f32 = 57.2957795130823208767981548141051703_f32;
+ self * PIS_IN_180
+ }
+
+ /// Converts degrees to radians.
+ ///
+ /// ```
+ /// let angle = 180.0f32;
+ ///
+ /// let abs_difference = (angle.to_radians() - std::f32::consts::PI).abs();
+ ///
+ /// assert!(abs_difference <= f32::EPSILON);
+ /// ```
+ #[stable(feature = "f32_deg_rad_conversions", since = "1.7.0")]
+ #[inline]
+ pub fn to_radians(self) -> f32 {
+ let value: f32 = consts::PI;
+ self * (value / 180.0f32)
+ }
+
+ /// Returns the maximum of the two numbers.
+ ///
+ /// ```
+ /// let x = 1.0f32;
+ /// let y = 2.0f32;
+ ///
+ /// assert_eq!(x.max(y), y);
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn max(self, other: f32) -> f32 {
+ intrinsics::maxnumf32(self, other)
+ }
+
+ /// Returns the minimum of the two numbers.
+ ///
+ /// ```
+ /// let x = 1.0f32;
+ /// let y = 2.0f32;
+ ///
+ /// assert_eq!(x.min(y), x);
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn min(self, other: f32) -> f32 {
+ intrinsics::minnumf32(self, other)
+ }
+
+ /// Rounds toward zero and converts to any primitive integer type,
+ /// assuming that the value is finite and fits in that type.
+ ///
+ /// ```
+ /// let value = 4.6_f32;
+ /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
+ /// assert_eq!(rounded, 4);
+ ///
+ /// let value = -128.9_f32;
+ /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
+ /// assert_eq!(rounded, i8::MIN);
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The value must:
+ ///
+ /// * Not be `NaN`
+ /// * Not be infinite
+ /// * Be representable in the return type `Int`, after truncating off its fractional part
+ #[stable(feature = "float_approx_unchecked_to", since = "1.44.0")]
+ #[inline]
+ pub unsafe fn to_int_unchecked<Int>(self) -> Int
+ where
+ Self: FloatToInt<Int>,
+ {
+ // SAFETY: the caller must uphold the safety contract for
+ // `FloatToInt::to_int_unchecked`.
+ unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
+ }
+
+ /// Raw transmutation to `u32`.
+ ///
+ /// This is currently identical to `transmute::<f32, u32>(self)` on all platforms.
+ ///
+ /// See `from_bits` for some discussion of the portability of this operation
+ /// (there are almost no issues).
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_ne!((1f32).to_bits(), 1f32 as u32); // to_bits() is not casting!
+ /// assert_eq!((12.5f32).to_bits(), 0x41480000);
+ ///
+ /// ```
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_bits(self) -> u32 {
+ // SAFETY: `u32` is a plain old datatype so we can always transmute to it
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Raw transmutation from `u32`.
+ ///
+ /// This is currently identical to `transmute::<u32, f32>(v)` on all platforms.
+ /// It turns out this is incredibly portable, for two reasons:
+ ///
+ /// * Floats and Ints have the same endianness on all supported platforms.
+ /// * IEEE-754 very precisely specifies the bit layout of floats.
+ ///
+ /// However there is one caveat: prior to the 2008 version of IEEE-754, how
+ /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
+ /// (notably x86 and ARM) picked the interpretation that was ultimately
+ /// standardized in 2008, but some didn't (notably MIPS). As a result, all
+ /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
+ ///
+ /// Rather than trying to preserve signaling-ness cross-platform, this
+ /// implementation favors preserving the exact bits. This means that
+ /// any payloads encoded in NaNs will be preserved even if the result of
+ /// this method is sent over the network from an x86 machine to a MIPS one.
+ ///
+ /// If the results of this method are only manipulated by the same
+ /// architecture that produced them, then there is no portability concern.
+ ///
+ /// If the input isn't NaN, then there is no portability concern.
+ ///
+ /// If you don't care about signalingness (very likely), then there is no
+ /// portability concern.
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = f32::from_bits(0x41480000);
+ /// assert_eq!(v, 12.5);
+ /// ```
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_bits(v: u32) -> Self {
+ // SAFETY: `u32` is a plain old datatype so we can always transmute from it
+ // It turns out the safety issues with sNaN were overblown! Hooray!
+ unsafe { mem::transmute(v) }
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// big-endian (network) byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f32.to_be_bytes();
+ /// assert_eq!(bytes, [0x41, 0x48, 0x00, 0x00]);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; 4] {
+ self.to_bits().to_be_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f32.to_le_bytes();
+ /// assert_eq!(bytes, [0x00, 0x00, 0x48, 0x41]);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; 4] {
+ self.to_bits().to_le_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// native byte order.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
+ ///
+ /// [`to_be_bytes`]: #method.to_be_bytes
+ /// [`to_le_bytes`]: #method.to_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f32.to_ne_bytes();
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ /// [0x41, 0x48, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x48, 0x41]
+ /// }
+ /// );
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; 4] {
+ self.to_bits().to_ne_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// native byte order.
+ ///
+ /// [`to_ne_bytes`] should be preferred over this whenever possible.
+ ///
+ /// [`to_ne_bytes`]: #method.to_ne_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(num_as_ne_bytes)]
+ /// let num = 12.5f32;
+ /// let bytes = num.as_ne_bytes();
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ /// &[0x41, 0x48, 0x00, 0x00]
+ /// } else {
+ /// &[0x00, 0x00, 0x48, 0x41]
+ /// }
+ /// );
+ /// ```
+ #[unstable(feature = "num_as_ne_bytes", issue = "76976")]
+ #[inline]
+ pub fn as_ne_bytes(&self) -> &[u8; 4] {
+ // SAFETY: `f32` is a plain old datatype so we can always transmute to it
+ unsafe { &*(self as *const Self as *const _) }
+ }
+
+ /// Create a floating point value from its representation as a byte array in big endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f32::from_be_bytes([0x41, 0x48, 0x00, 0x00]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; 4]) -> Self {
+ Self::from_bits(u32::from_be_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in little endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f32::from_le_bytes([0x00, 0x00, 0x48, 0x41]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; 4]) -> Self {
+ Self::from_bits(u32::from_le_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in native endian.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+ /// appropriate instead.
+ ///
+ /// [`from_be_bytes`]: #method.from_be_bytes
+ /// [`from_le_bytes`]: #method.from_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f32::from_ne_bytes(if cfg!(target_endian = "big") {
+ /// [0x41, 0x48, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x48, 0x41]
+ /// });
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; 4]) -> Self {
+ Self::from_bits(u32::from_ne_bytes(bytes))
+ }
+
+ /// Returns an ordering between self and other values.
+ /// Unlike the standard partial comparison between floating point numbers,
+ /// this comparison always produces an ordering in accordance to
+ /// the totalOrder predicate as defined in IEEE 754 (2008 revision)
+ /// floating point standard. The values are ordered in following order:
+ /// - Negative quiet NaN
+ /// - Negative signaling NaN
+ /// - Negative infinity
+ /// - Negative numbers
+ /// - Negative subnormal numbers
+ /// - Negative zero
+ /// - Positive zero
+ /// - Positive subnormal numbers
+ /// - Positive numbers
+ /// - Positive infinity
+ /// - Positive signaling NaN
+ /// - Positive quiet NaN
+ ///
+ /// Note that this function does not always agree with the [`PartialOrd`]
+ /// and [`PartialEq`] implementations of `f32`. In particular, they regard
+ /// negative and positive zero as equal, while `total_cmp` doesn't.
+ ///
+ /// # Example
+ /// ```
+ /// #![feature(total_cmp)]
+ /// struct GoodBoy {
+ /// name: String,
+ /// weight: f32,
+ /// }
+ ///
+ /// let mut bois = vec![
+ /// GoodBoy { name: "Pucci".to_owned(), weight: 0.1 },
+ /// GoodBoy { name: "Woofer".to_owned(), weight: 99.0 },
+ /// GoodBoy { name: "Yapper".to_owned(), weight: 10.0 },
+ /// GoodBoy { name: "Chonk".to_owned(), weight: f32::INFINITY },
+ /// GoodBoy { name: "Abs. Unit".to_owned(), weight: f32::NAN },
+ /// GoodBoy { name: "Floaty".to_owned(), weight: -5.0 },
+ /// ];
+ ///
+ /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
+ /// # assert!(bois.into_iter().map(|b| b.weight)
+ /// # .zip([-5.0, 0.1, 10.0, 99.0, f32::INFINITY, f32::NAN].iter())
+ /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// ```
+ #[unstable(feature = "total_cmp", issue = "72599")]
+ #[inline]
+ pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
+ let mut left = self.to_bits() as i32;
+ let mut right = other.to_bits() as i32;
+
+ // In case of negatives, flip all the bits except the sign
+ // to achieve a similar layout as two's complement integers
+ //
+ // Why does this work? IEEE 754 floats consist of three fields:
+ // Sign bit, exponent and mantissa. The set of exponent and mantissa
+ // fields as a whole have the property that their bitwise order is
+ // equal to the numeric magnitude where the magnitude is defined.
+ // The magnitude is not normally defined on NaN values, but
+ // IEEE 754 totalOrder defines the NaN values also to follow the
+ // bitwise order. This leads to order explained in the doc comment.
+ // However, the representation of magnitude is the same for negative
+ // and positive numbers – only the sign bit is different.
+ // To easily compare the floats as signed integers, we need to
+ // flip the exponent and mantissa bits in case of negative numbers.
+ // We effectively convert the numbers to "two's complement" form.
+ //
+ // To do the flipping, we construct a mask and XOR against it.
+ // We branchlessly calculate an "all-ones except for the sign bit"
+ // mask from negative-signed values: right shifting sign-extends
+ // the integer, so we "fill" the mask with sign bits, and then
+ // convert to unsigned to push one more zero bit.
+ // On positive values, the mask is all zeros, so it's a no-op.
+ left ^= (((left >> 31) as u32) >> 1) as i32;
+ right ^= (((right >> 31) as u32) >> 1) as i32;
+
+ left.cmp(&right)
+ }
+}
--- /dev/null
+//! This module provides constants which are specific to the implementation
+//! of the `f64` floating point data type.
+//!
+//! *[See also the `f64` primitive type](../../std/primitive.f64.html).*
+//!
+//! Mathematically significant numbers are provided in the `consts` sub-module.
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::convert::FloatToInt;
+#[cfg(not(test))]
+use crate::intrinsics;
+use crate::mem;
+use crate::num::FpCategory;
+
+/// The radix or base of the internal representation of `f64`.
+/// Use [`f64::RADIX`](../../std/primitive.f64.html#associatedconstant.RADIX) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let r = std::f64::RADIX;
+///
+/// // intended way
+/// let r = f64::RADIX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const RADIX: u32 = f64::RADIX;
+
+/// Number of significant digits in base 2.
+/// Use [`f64::MANTISSA_DIGITS`](../../std/primitive.f64.html#associatedconstant.MANTISSA_DIGITS) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let d = std::f64::MANTISSA_DIGITS;
+///
+/// // intended way
+/// let d = f64::MANTISSA_DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MANTISSA_DIGITS: u32 = f64::MANTISSA_DIGITS;
+
+/// Approximate number of significant digits in base 10.
+/// Use [`f64::DIGITS`](../../std/primitive.f64.html#associatedconstant.DIGITS) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let d = std::f64::DIGITS;
+///
+/// // intended way
+/// let d = f64::DIGITS;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const DIGITS: u32 = f64::DIGITS;
+
+/// [Machine epsilon] value for `f64`.
+/// Use [`f64::EPSILON`](../../std/primitive.f64.html#associatedconstant.EPSILON) instead.
+///
+/// This is the difference between `1.0` and the next larger representable number.
+///
+/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let e = std::f64::EPSILON;
+///
+/// // intended way
+/// let e = f64::EPSILON;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const EPSILON: f64 = f64::EPSILON;
+
+/// Smallest finite `f64` value.
+/// Use [`f64::MIN`](../../std/primitive.f64.html#associatedconstant.MIN) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f64::MIN;
+///
+/// // intended way
+/// let min = f64::MIN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN: f64 = f64::MIN;
+
+/// Smallest positive normal `f64` value.
+/// Use [`f64::MIN_POSITIVE`](../../std/primitive.f64.html#associatedconstant.MIN_POSITIVE) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f64::MIN_POSITIVE;
+///
+/// // intended way
+/// let min = f64::MIN_POSITIVE;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_POSITIVE: f64 = f64::MIN_POSITIVE;
+
+/// Largest finite `f64` value.
+/// Use [`f64::MAX`](../../std/primitive.f64.html#associatedconstant.MAX) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let max = std::f64::MAX;
+///
+/// // intended way
+/// let max = f64::MAX;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX: f64 = f64::MAX;
+
+/// One greater than the minimum possible normal power of 2 exponent.
+/// Use [`f64::MIN_EXP`](../../std/primitive.f64.html#associatedconstant.MIN_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f64::MIN_EXP;
+///
+/// // intended way
+/// let min = f64::MIN_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_EXP: i32 = f64::MIN_EXP;
+
+/// Maximum possible power of 2 exponent.
+/// Use [`f64::MAX_EXP`](../../std/primitive.f64.html#associatedconstant.MAX_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let max = std::f64::MAX_EXP;
+///
+/// // intended way
+/// let max = f64::MAX_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX_EXP: i32 = f64::MAX_EXP;
+
+/// Minimum possible normal power of 10 exponent.
+/// Use [`f64::MIN_10_EXP`](../../std/primitive.f64.html#associatedconstant.MIN_10_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let min = std::f64::MIN_10_EXP;
+///
+/// // intended way
+/// let min = f64::MIN_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MIN_10_EXP: i32 = f64::MIN_10_EXP;
+
+/// Maximum possible power of 10 exponent.
+/// Use [`f64::MAX_10_EXP`](../../std/primitive.f64.html#associatedconstant.MAX_10_EXP) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let max = std::f64::MAX_10_EXP;
+///
+/// // intended way
+/// let max = f64::MAX_10_EXP;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const MAX_10_EXP: i32 = f64::MAX_10_EXP;
+
+/// Not a Number (NaN).
+/// Use [`f64::NAN`](../../std/primitive.f64.html#associatedconstant.NAN) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let nan = std::f64::NAN;
+///
+/// // intended way
+/// let nan = f64::NAN;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const NAN: f64 = f64::NAN;
+
+/// Infinity (∞).
+/// Use [`f64::INFINITY`](../../std/primitive.f64.html#associatedconstant.INFINITY) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let inf = std::f64::INFINITY;
+///
+/// // intended way
+/// let inf = f64::INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const INFINITY: f64 = f64::INFINITY;
+
+/// Negative infinity (−∞).
+/// Use [`f64::NEG_INFINITY`](../../std/primitive.f64.html#associatedconstant.NEG_INFINITY) instead.
+///
+/// # Examples
+///
+/// ```rust
+/// // deprecated way
+/// let ninf = std::f64::NEG_INFINITY;
+///
+/// // intended way
+/// let ninf = f64::NEG_INFINITY;
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub const NEG_INFINITY: f64 = f64::NEG_INFINITY;
+
+/// Basic mathematical constants.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod consts {
+ // FIXME: replace with mathematical constants from cmath.
+
+ /// Archimedes' constant (π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const PI: f64 = 3.14159265358979323846264338327950288_f64;
+
+ /// The full circle constant (τ)
+ ///
+ /// Equal to 2π.
+ #[stable(feature = "tau_constant", since = "1.47.0")]
+ pub const TAU: f64 = 6.28318530717958647692528676655900577_f64;
+
+ /// π/2
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_2: f64 = 1.57079632679489661923132169163975144_f64;
+
+ /// π/3
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_3: f64 = 1.04719755119659774615421446109316763_f64;
+
+ /// π/4
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_4: f64 = 0.785398163397448309615660845819875721_f64;
+
+ /// π/6
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_6: f64 = 0.52359877559829887307710723054658381_f64;
+
+ /// π/8
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_PI_8: f64 = 0.39269908169872415480783042290993786_f64;
+
+ /// 1/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_PI: f64 = 0.318309886183790671537767526745028724_f64;
+
+ /// 2/π
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_PI: f64 = 0.636619772367581343075535053490057448_f64;
+
+ /// 2/sqrt(π)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_2_SQRT_PI: f64 = 1.12837916709551257389615890312154517_f64;
+
+ /// sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const SQRT_2: f64 = 1.41421356237309504880168872420969808_f64;
+
+ /// 1/sqrt(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const FRAC_1_SQRT_2: f64 = 0.707106781186547524400844362104849039_f64;
+
+ /// Euler's number (e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const E: f64 = 2.71828182845904523536028747135266250_f64;
+
+ /// log<sub>2</sub>(10)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG2_10: f64 = 3.32192809488736234787031942948939018_f64;
+
+ /// log<sub>2</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG2_E: f64 = 1.44269504088896340735992468100189214_f64;
+
+ /// log<sub>10</sub>(2)
+ #[stable(feature = "extra_log_consts", since = "1.43.0")]
+ pub const LOG10_2: f64 = 0.301029995663981195213738894724493027_f64;
+
+ /// log<sub>10</sub>(e)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LOG10_E: f64 = 0.434294481903251827651128918916605082_f64;
+
+ /// ln(2)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_2: f64 = 0.693147180559945309417232121458176568_f64;
+
+ /// ln(10)
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const LN_10: f64 = 2.30258509299404568401799145468436421_f64;
+}
+
+#[lang = "f64"]
+#[cfg(not(test))]
+impl f64 {
+ /// The radix or base of the internal representation of `f64`.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const RADIX: u32 = 2;
+
+ /// Number of significant digits in base 2.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MANTISSA_DIGITS: u32 = 53;
+ /// Approximate number of significant digits in base 10.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const DIGITS: u32 = 15;
+
+ /// [Machine epsilon] value for `f64`.
+ ///
+ /// This is the difference between `1.0` and the next larger representable number.
+ ///
+ /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const EPSILON: f64 = 2.2204460492503131e-16_f64;
+
+ /// Smallest finite `f64` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: f64 = -1.7976931348623157e+308_f64;
+ /// Smallest positive normal `f64` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_POSITIVE: f64 = 2.2250738585072014e-308_f64;
+ /// Largest finite `f64` value.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: f64 = 1.7976931348623157e+308_f64;
+
+ /// One greater than the minimum possible normal power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_EXP: i32 = -1021;
+ /// Maximum possible power of 2 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_EXP: i32 = 1024;
+
+ /// Minimum possible normal power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN_10_EXP: i32 = -307;
+ /// Maximum possible power of 10 exponent.
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX_10_EXP: i32 = 308;
+
+ /// Not a Number (NaN).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NAN: f64 = 0.0_f64 / 0.0_f64;
+ /// Infinity (∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const INFINITY: f64 = 1.0_f64 / 0.0_f64;
+ /// Negative infinity (−∞).
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64;
+
+ /// Returns `true` if this value is `NaN`.
+ ///
+ /// ```
+ /// let nan = f64::NAN;
+ /// let f = 7.0_f64;
+ ///
+ /// assert!(nan.is_nan());
+ /// assert!(!f.is_nan());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_nan(self) -> bool {
+ self != self
+ }
+
+ // FIXME(#50145): `abs` is publicly unavailable in libcore due to
+ // concerns about portability, so this implementation is for
+ // private use internally.
+ #[inline]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ const fn abs_private(self) -> f64 {
+ f64::from_bits(self.to_bits() & 0x7fff_ffff_ffff_ffff)
+ }
+
+ /// Returns `true` if this value is positive infinity or negative infinity, and
+ /// `false` otherwise.
+ ///
+ /// ```
+ /// let f = 7.0f64;
+ /// let inf = f64::INFINITY;
+ /// let neg_inf = f64::NEG_INFINITY;
+ /// let nan = f64::NAN;
+ ///
+ /// assert!(!f.is_infinite());
+ /// assert!(!nan.is_infinite());
+ ///
+ /// assert!(inf.is_infinite());
+ /// assert!(neg_inf.is_infinite());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_infinite(self) -> bool {
+ self.abs_private() == Self::INFINITY
+ }
+
+ /// Returns `true` if this number is neither infinite nor `NaN`.
+ ///
+ /// ```
+ /// let f = 7.0f64;
+ /// let inf: f64 = f64::INFINITY;
+ /// let neg_inf: f64 = f64::NEG_INFINITY;
+ /// let nan: f64 = f64::NAN;
+ ///
+ /// assert!(f.is_finite());
+ ///
+ /// assert!(!nan.is_finite());
+ /// assert!(!inf.is_finite());
+ /// assert!(!neg_inf.is_finite());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_finite(self) -> bool {
+ // There's no need to handle NaN separately: if self is NaN,
+ // the comparison is not true, exactly as desired.
+ self.abs_private() < Self::INFINITY
+ }
+
+ /// Returns `true` if the number is neither zero, infinite,
+ /// [subnormal], or `NaN`.
+ ///
+ /// ```
+ /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308f64
+ /// let max = f64::MAX;
+ /// let lower_than_min = 1.0e-308_f64;
+ /// let zero = 0.0f64;
+ ///
+ /// assert!(min.is_normal());
+ /// assert!(max.is_normal());
+ ///
+ /// assert!(!zero.is_normal());
+ /// assert!(!f64::NAN.is_normal());
+ /// assert!(!f64::INFINITY.is_normal());
+ /// // Values between `0` and `min` are Subnormal.
+ /// assert!(!lower_than_min.is_normal());
+ /// ```
+ /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_normal(self) -> bool {
+ matches!(self.classify(), FpCategory::Normal)
+ }
+
+ /// Returns the floating point category of the number. If only one property
+ /// is going to be tested, it is generally faster to use the specific
+ /// predicate instead.
+ ///
+ /// ```
+ /// use std::num::FpCategory;
+ ///
+ /// let num = 12.4_f64;
+ /// let inf = f64::INFINITY;
+ ///
+ /// assert_eq!(num.classify(), FpCategory::Normal);
+ /// assert_eq!(inf.classify(), FpCategory::Infinite);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ pub const fn classify(self) -> FpCategory {
+ const EXP_MASK: u64 = 0x7ff0000000000000;
+ const MAN_MASK: u64 = 0x000fffffffffffff;
+
+ let bits = self.to_bits();
+ match (bits & MAN_MASK, bits & EXP_MASK) {
+ (0, 0) => FpCategory::Zero,
+ (_, 0) => FpCategory::Subnormal,
+ (0, EXP_MASK) => FpCategory::Infinite,
+ (_, EXP_MASK) => FpCategory::Nan,
+ _ => FpCategory::Normal,
+ }
+ }
+
+ /// Returns `true` if `self` has a positive sign, including `+0.0`, `NaN`s with
+ /// positive sign bit and positive infinity.
+ ///
+ /// ```
+ /// let f = 7.0_f64;
+ /// let g = -7.0_f64;
+ ///
+ /// assert!(f.is_sign_positive());
+ /// assert!(!g.is_sign_positive());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_positive(self) -> bool {
+ !self.is_sign_negative()
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.0.0", reason = "renamed to is_sign_positive")]
+ #[inline]
+ #[doc(hidden)]
+ pub fn is_positive(self) -> bool {
+ self.is_sign_positive()
+ }
+
+ /// Returns `true` if `self` has a negative sign, including `-0.0`, `NaN`s with
+ /// negative sign bit and negative infinity.
+ ///
+ /// ```
+ /// let f = 7.0_f64;
+ /// let g = -7.0_f64;
+ ///
+ /// assert!(!f.is_sign_negative());
+ /// assert!(g.is_sign_negative());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_float_classify", issue = "72505")]
+ #[inline]
+ pub const fn is_sign_negative(self) -> bool {
+ self.to_bits() & 0x8000_0000_0000_0000 != 0
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.0.0", reason = "renamed to is_sign_negative")]
+ #[inline]
+ #[doc(hidden)]
+ pub fn is_negative(self) -> bool {
+ self.is_sign_negative()
+ }
+
+ /// Takes the reciprocal (inverse) of a number, `1/x`.
+ ///
+ /// ```
+ /// let x = 2.0_f64;
+ /// let abs_difference = (x.recip() - (1.0 / x)).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn recip(self) -> f64 {
+ 1.0 / self
+ }
+
+ /// Converts radians to degrees.
+ ///
+ /// ```
+ /// let angle = std::f64::consts::PI;
+ ///
+ /// let abs_difference = (angle.to_degrees() - 180.0).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_degrees(self) -> f64 {
+ // The division here is correctly rounded with respect to the true
+ // value of 180/π. (This differs from f32, where a constant must be
+ // used to ensure a correctly rounded result.)
+ self * (180.0f64 / consts::PI)
+ }
+
+ /// Converts degrees to radians.
+ ///
+ /// ```
+ /// let angle = 180.0_f64;
+ ///
+ /// let abs_difference = (angle.to_radians() - std::f64::consts::PI).abs();
+ ///
+ /// assert!(abs_difference < 1e-10);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_radians(self) -> f64 {
+ let value: f64 = consts::PI;
+ self * (value / 180.0)
+ }
+
+ /// Returns the maximum of the two numbers.
+ ///
+ /// ```
+ /// let x = 1.0_f64;
+ /// let y = 2.0_f64;
+ ///
+ /// assert_eq!(x.max(y), y);
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn max(self, other: f64) -> f64 {
+ intrinsics::maxnumf64(self, other)
+ }
+
+ /// Returns the minimum of the two numbers.
+ ///
+ /// ```
+ /// let x = 1.0_f64;
+ /// let y = 2.0_f64;
+ ///
+ /// assert_eq!(x.min(y), x);
+ /// ```
+ ///
+ /// If one of the arguments is NaN, then the other argument is returned.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn min(self, other: f64) -> f64 {
+ intrinsics::minnumf64(self, other)
+ }
+
+ /// Rounds toward zero and converts to any primitive integer type,
+ /// assuming that the value is finite and fits in that type.
+ ///
+ /// ```
+ /// let value = 4.6_f64;
+ /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
+ /// assert_eq!(rounded, 4);
+ ///
+ /// let value = -128.9_f64;
+ /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
+ /// assert_eq!(rounded, i8::MIN);
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The value must:
+ ///
+ /// * Not be `NaN`
+ /// * Not be infinite
+ /// * Be representable in the return type `Int`, after truncating off its fractional part
+ #[stable(feature = "float_approx_unchecked_to", since = "1.44.0")]
+ #[inline]
+ pub unsafe fn to_int_unchecked<Int>(self) -> Int
+ where
+ Self: FloatToInt<Int>,
+ {
+ // SAFETY: the caller must uphold the safety contract for
+ // `FloatToInt::to_int_unchecked`.
+ unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
+ }
+
+ /// Raw transmutation to `u64`.
+ ///
+ /// This is currently identical to `transmute::<f64, u64>(self)` on all platforms.
+ ///
+ /// See `from_bits` for some discussion of the portability of this operation
+ /// (there are almost no issues).
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!((1f64).to_bits() != 1f64 as u64); // to_bits() is not casting!
+ /// assert_eq!((12.5f64).to_bits(), 0x4029000000000000);
+ ///
+ /// ```
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_bits(self) -> u64 {
+ // SAFETY: `u64` is a plain old datatype so we can always transmute to it
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Raw transmutation from `u64`.
+ ///
+ /// This is currently identical to `transmute::<u64, f64>(v)` on all platforms.
+ /// It turns out this is incredibly portable, for two reasons:
+ ///
+ /// * Floats and Ints have the same endianness on all supported platforms.
+ /// * IEEE-754 very precisely specifies the bit layout of floats.
+ ///
+ /// However there is one caveat: prior to the 2008 version of IEEE-754, how
+ /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
+ /// (notably x86 and ARM) picked the interpretation that was ultimately
+ /// standardized in 2008, but some didn't (notably MIPS). As a result, all
+ /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
+ ///
+ /// Rather than trying to preserve signaling-ness cross-platform, this
+ /// implementation favors preserving the exact bits. This means that
+ /// any payloads encoded in NaNs will be preserved even if the result of
+ /// this method is sent over the network from an x86 machine to a MIPS one.
+ ///
+ /// If the results of this method are only manipulated by the same
+ /// architecture that produced them, then there is no portability concern.
+ ///
+ /// If the input isn't NaN, then there is no portability concern.
+ ///
+ /// If you don't care about signaling-ness (very likely), then there is no
+ /// portability concern.
+ ///
+ /// Note that this function is distinct from `as` casting, which attempts to
+ /// preserve the *numeric* value, and not the bitwise value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = f64::from_bits(0x4029000000000000);
+ /// assert_eq!(v, 12.5);
+ /// ```
+ #[stable(feature = "float_bits_conv", since = "1.20.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_bits(v: u64) -> Self {
+ // SAFETY: `u64` is a plain old datatype so we can always transmute from it
+ // It turns out the safety issues with sNaN were overblown! Hooray!
+ unsafe { mem::transmute(v) }
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// big-endian (network) byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f64.to_be_bytes();
+ /// assert_eq!(bytes, [0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; 8] {
+ self.to_bits().to_be_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f64.to_le_bytes();
+ /// assert_eq!(bytes, [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; 8] {
+ self.to_bits().to_le_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// native byte order.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
+ ///
+ /// [`to_be_bytes`]: #method.to_be_bytes
+ /// [`to_le_bytes`]: #method.to_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let bytes = 12.5f64.to_ne_bytes();
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ /// [0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]
+ /// }
+ /// );
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; 8] {
+ self.to_bits().to_ne_bytes()
+ }
+
+ /// Return the memory representation of this floating point number as a byte array in
+ /// native byte order.
+ ///
+ /// [`to_ne_bytes`] should be preferred over this whenever possible.
+ ///
+ /// [`to_ne_bytes`]: #method.to_ne_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(num_as_ne_bytes)]
+ /// let num = 12.5f64;
+ /// let bytes = num.as_ne_bytes();
+ /// assert_eq!(
+ /// bytes,
+ /// if cfg!(target_endian = "big") {
+ /// &[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+ /// } else {
+ /// &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]
+ /// }
+ /// );
+ /// ```
+ #[unstable(feature = "num_as_ne_bytes", issue = "76976")]
+ #[inline]
+ pub fn as_ne_bytes(&self) -> &[u8; 8] {
+ // SAFETY: `f64` is a plain old datatype so we can always transmute to it
+ unsafe { &*(self as *const Self as *const _) }
+ }
+
+ /// Create a floating point value from its representation as a byte array in big endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f64::from_be_bytes([0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; 8]) -> Self {
+ Self::from_bits(u64::from_be_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in little endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f64::from_le_bytes([0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]);
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; 8]) -> Self {
+ Self::from_bits(u64::from_le_bytes(bytes))
+ }
+
+ /// Create a floating point value from its representation as a byte array in native endian.
+ ///
+ /// As the target platform's native endianness is used, portable code
+ /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+ /// appropriate instead.
+ ///
+ /// [`from_be_bytes`]: #method.from_be_bytes
+ /// [`from_le_bytes`]: #method.from_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let value = f64::from_ne_bytes(if cfg!(target_endian = "big") {
+ /// [0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
+ /// } else {
+ /// [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40]
+ /// });
+ /// assert_eq!(value, 12.5);
+ /// ```
+ #[stable(feature = "float_to_from_bytes", since = "1.40.0")]
+ #[rustc_const_unstable(feature = "const_float_bits_conv", issue = "72447")]
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; 8]) -> Self {
+ Self::from_bits(u64::from_ne_bytes(bytes))
+ }
+
+ /// Returns an ordering between self and other values.
+ /// Unlike the standard partial comparison between floating point numbers,
+ /// this comparison always produces an ordering in accordance to
+ /// the totalOrder predicate as defined in IEEE 754 (2008 revision)
+ /// floating point standard. The values are ordered in following order:
+ /// - Negative quiet NaN
+ /// - Negative signaling NaN
+ /// - Negative infinity
+ /// - Negative numbers
+ /// - Negative subnormal numbers
+ /// - Negative zero
+ /// - Positive zero
+ /// - Positive subnormal numbers
+ /// - Positive numbers
+ /// - Positive infinity
+ /// - Positive signaling NaN
+ /// - Positive quiet NaN
+ ///
+ /// Note that this function does not always agree with the [`PartialOrd`]
+ /// and [`PartialEq`] implementations of `f64`. In particular, they regard
+ /// negative and positive zero as equal, while `total_cmp` doesn't.
+ ///
+ /// # Example
+ /// ```
+ /// #![feature(total_cmp)]
+ /// struct GoodBoy {
+ /// name: String,
+ /// weight: f64,
+ /// }
+ ///
+ /// let mut bois = vec![
+ /// GoodBoy { name: "Pucci".to_owned(), weight: 0.1 },
+ /// GoodBoy { name: "Woofer".to_owned(), weight: 99.0 },
+ /// GoodBoy { name: "Yapper".to_owned(), weight: 10.0 },
+ /// GoodBoy { name: "Chonk".to_owned(), weight: f64::INFINITY },
+ /// GoodBoy { name: "Abs. Unit".to_owned(), weight: f64::NAN },
+ /// GoodBoy { name: "Floaty".to_owned(), weight: -5.0 },
+ /// ];
+ ///
+ /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
+ /// # assert!(bois.into_iter().map(|b| b.weight)
+ /// # .zip([-5.0, 0.1, 10.0, 99.0, f64::INFINITY, f64::NAN].iter())
+ /// # .all(|(a, b)| a.to_bits() == b.to_bits()))
+ /// ```
+ #[unstable(feature = "total_cmp", issue = "72599")]
+ #[inline]
+ pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
+ let mut left = self.to_bits() as i64;
+ let mut right = other.to_bits() as i64;
+
+ // In case of negatives, flip all the bits except the sign
+ // to achieve a similar layout as two's complement integers
+ //
+ // Why does this work? IEEE 754 floats consist of three fields:
+ // Sign bit, exponent and mantissa. The set of exponent and mantissa
+ // fields as a whole have the property that their bitwise order is
+ // equal to the numeric magnitude where the magnitude is defined.
+ // The magnitude is not normally defined on NaN values, but
+ // IEEE 754 totalOrder defines the NaN values also to follow the
+ // bitwise order. This leads to order explained in the doc comment.
+ // However, the representation of magnitude is the same for negative
+ // and positive numbers – only the sign bit is different.
+ // To easily compare the floats as signed integers, we need to
+ // flip the exponent and mantissa bits in case of negative numbers.
+ // We effectively convert the numbers to "two's complement" form.
+ //
+ // To do the flipping, we construct a mask and XOR against it.
+ // We branchlessly calculate an "all-ones except for the sign bit"
+ // mask from negative-signed values: right shifting sign-extends
+ // the integer, so we "fill" the mask with sign bits, and then
+ // convert to unsigned to push one more zero bit.
+ // On positive values, the mask is all zeros, so it's a no-op.
+ left ^= (((left >> 63) as u64) >> 1) as i64;
+ right ^= (((right >> 63) as u64) >> 1) as i64;
+
+ left.cmp(&right)
+ }
+}
--- /dev/null
+//! Decodes a floating-point value into individual parts and error ranges.
+
+use crate::num::dec2flt::rawfp::RawFloat;
+use crate::num::FpCategory;
+
+/// Decoded unsigned finite value, such that:
+///
+/// - The original value equals to `mant * 2^exp`.
+///
+/// - Any number from `(mant - minus) * 2^exp` to `(mant + plus) * 2^exp` will
+/// round to the original value. The range is inclusive only when
+/// `inclusive` is `true`.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct Decoded {
+ /// The scaled mantissa.
+ pub mant: u64,
+ /// The lower error range.
+ pub minus: u64,
+ /// The upper error range.
+ pub plus: u64,
+ /// The shared exponent in base 2.
+ pub exp: i16,
+ /// True when the error range is inclusive.
+ ///
+ /// In IEEE 754, this is true when the original mantissa was even.
+ pub inclusive: bool,
+}
+
+/// Decoded unsigned value.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum FullDecoded {
+ /// Not-a-number.
+ Nan,
+ /// Infinities, either positive or negative.
+ Infinite,
+ /// Zero, either positive or negative.
+ Zero,
+ /// Finite numbers with further decoded fields.
+ Finite(Decoded),
+}
+
+/// A floating point type which can be `decode`d.
+pub trait DecodableFloat: RawFloat + Copy {
+ /// The minimum positive normalized value.
+ fn min_pos_norm_value() -> Self;
+}
+
+impl DecodableFloat for f32 {
+ fn min_pos_norm_value() -> Self {
+ f32::MIN_POSITIVE
+ }
+}
+
+impl DecodableFloat for f64 {
+ fn min_pos_norm_value() -> Self {
+ f64::MIN_POSITIVE
+ }
+}
+
+/// Returns a sign (true when negative) and `FullDecoded` value
+/// from given floating point number.
+pub fn decode<T: DecodableFloat>(v: T) -> (/*negative?*/ bool, FullDecoded) {
+ let (mant, exp, sign) = v.integer_decode();
+ let even = (mant & 1) == 0;
+ let decoded = match v.classify() {
+ FpCategory::Nan => FullDecoded::Nan,
+ FpCategory::Infinite => FullDecoded::Infinite,
+ FpCategory::Zero => FullDecoded::Zero,
+ FpCategory::Subnormal => {
+ // neighbors: (mant - 2, exp) -- (mant, exp) -- (mant + 2, exp)
+ // Float::integer_decode always preserves the exponent,
+ // so the mantissa is scaled for subnormals.
+ FullDecoded::Finite(Decoded { mant, minus: 1, plus: 1, exp, inclusive: even })
+ }
+ FpCategory::Normal => {
+ let minnorm = <T as DecodableFloat>::min_pos_norm_value().integer_decode();
+ if mant == minnorm.0 {
+ // neighbors: (maxmant, exp - 1) -- (minnormmant, exp) -- (minnormmant + 1, exp)
+ // where maxmant = minnormmant * 2 - 1
+ FullDecoded::Finite(Decoded {
+ mant: mant << 2,
+ minus: 1,
+ plus: 2,
+ exp: exp - 2,
+ inclusive: even,
+ })
+ } else {
+ // neighbors: (mant - 1, exp) -- (mant, exp) -- (mant + 1, exp)
+ FullDecoded::Finite(Decoded {
+ mant: mant << 1,
+ minus: 1,
+ plus: 1,
+ exp: exp - 1,
+ inclusive: even,
+ })
+ }
+ }
+ };
+ (sign < 0, decoded)
+}
--- /dev/null
+//! The exponent estimator.
+
+/// Finds `k_0` such that `10^(k_0-1) < mant * 2^exp <= 10^(k_0+1)`.
+///
+/// This is used to approximate `k = ceil(log_10 (mant * 2^exp))`;
+/// the true `k` is either `k_0` or `k_0+1`.
+#[doc(hidden)]
+pub fn estimate_scaling_factor(mant: u64, exp: i16) -> i16 {
+ // 2^(nbits-1) < mant <= 2^nbits if mant > 0
+ let nbits = 64 - (mant - 1).leading_zeros() as i64;
+ // 1292913986 = floor(2^32 * log_10 2)
+ // therefore this always underestimates (or is exact), but not much.
+ (((nbits + exp as i64) * 1292913986) >> 32) as i16
+}
--- /dev/null
+/*!
+
+Floating-point number to decimal conversion routines.
+
+# Problem statement
+
+We are given the floating-point number `v = f * 2^e` with an integer `f`,
+and its bounds `minus` and `plus` such that any number between `v - minus` and
+`v + plus` will be rounded to `v`. For the simplicity we assume that
+this range is exclusive. Then we would like to get the unique decimal
+representation `V = 0.d[0..n-1] * 10^k` such that:
+
+- `d[0]` is non-zero.
+
+- It's correctly rounded when parsed back: `v - minus < V < v + plus`.
+ Furthermore it is shortest such one, i.e., there is no representation
+ with less than `n` digits that is correctly rounded.
+
+- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Note that
+ there might be two representations satisfying this uniqueness requirement,
+ in which case some tie-breaking mechanism is used.
+
+We will call this mode of operation as to the *shortest* mode. This mode is used
+when there is no additional constraint, and can be thought as a "natural" mode
+as it matches the ordinary intuition (it at least prints `0.1f32` as "0.1").
+
+We have two more modes of operation closely related to each other. In these modes
+we are given either the number of significant digits `n` or the last-digit
+limitation `limit` (which determines the actual `n`), and we would like to get
+the representation `V = 0.d[0..n-1] * 10^k` such that:
+
+- `d[0]` is non-zero, unless `n` was zero in which case only `k` is returned.
+
+- It's closest to the original value: `abs(V - v) <= 10^(k-n) / 2`. Again,
+ there might be some tie-breaking mechanism.
+
+When `limit` is given but not `n`, we set `n` such that `k - n = limit`
+so that the last digit `d[n-1]` is scaled by `10^(k-n) = 10^limit`.
+If such `n` is negative, we clip it to zero so that we will only get `k`.
+We are also limited by the supplied buffer. This limitation is used to print
+the number up to given number of fractional digits without knowing
+the correct `k` beforehand.
+
+We will call the mode of operation requiring `n` as to the *exact* mode,
+and one requiring `limit` as to the *fixed* mode. The exact mode is a subset of
+the fixed mode: the sufficiently large last-digit limitation will eventually fill
+the supplied buffer and let the algorithm to return.
+
+# Implementation overview
+
+It is easy to get the floating point printing correct but slow (Russ Cox has
+[demonstrated](http://research.swtch.com/ftoa) how it's easy), or incorrect but
+fast (naïve division and modulo). But it is surprisingly hard to print
+floating point numbers correctly *and* efficiently.
+
+There are two classes of algorithms widely known to be correct.
+
+- The "Dragon" family of algorithm is first described by Guy L. Steele Jr. and
+ Jon L. White. They rely on the fixed-size big integer for their correctness.
+ A slight improvement was found later, which is posthumously described by
+ Robert G. Burger and R. Kent Dybvig. David Gay's `dtoa.c` routine is
+ a popular implementation of this strategy.
+
+- The "Grisu" family of algorithm is first described by Florian Loitsch.
+ They use very cheap integer-only procedure to determine the close-to-correct
+ representation which is at least guaranteed to be shortest. The variant,
+ Grisu3, actively detects if the resulting representation is incorrect.
+
+We implement both algorithms with necessary tweaks to suit our requirements.
+In particular, published literatures are short of the actual implementation
+difficulties like how to avoid arithmetic overflows. Each implementation,
+available in `strategy::dragon` and `strategy::grisu` respectively,
+extensively describes all necessary justifications and many proofs for them.
+(It is still difficult to follow though. You have been warned.)
+
+Both implementations expose two public functions:
+
+- `format_shortest(decoded, buf)`, which always needs at least
+ `MAX_SIG_DIGITS` digits of buffer. Implements the shortest mode.
+
+- `format_exact(decoded, buf, limit)`, which accepts as small as
+ one digit of buffer. Implements exact and fixed modes.
+
+They try to fill the `u8` buffer with digits and returns the number of digits
+written and the exponent `k`. They are total for all finite `f32` and `f64`
+inputs (Grisu internally falls back to Dragon if necessary).
+
+The rendered digits are formatted into the actual string form with
+four functions:
+
+- `to_shortest_str` prints the shortest representation, which can be padded by
+ zeroes to make *at least* given number of fractional digits.
+
+- `to_shortest_exp_str` prints the shortest representation, which can be
+ padded by zeroes when its exponent is in the specified ranges,
+ or can be printed in the exponential form such as `1.23e45`.
+
+- `to_exact_exp_str` prints the exact representation with given number of
+ digits in the exponential form.
+
+- `to_exact_fixed_str` prints the fixed representation with *exactly*
+ given number of fractional digits.
+
+They all return a slice of preallocated `Part` array, which corresponds to
+the individual part of strings: a fixed string, a part of rendered digits,
+a number of zeroes or a small (`u16`) number. The caller is expected to
+provide a large enough buffer and `Part` array, and to assemble the final
+string from resulting `Part`s itself.
+
+All algorithms and formatting functions are accompanied by extensive tests
+in `coretests::num::flt2dec` module. It also shows how to use individual
+functions.
+
+*/
+
+// while this is extensively documented, this is in principle private which is
+// only made public for testing. do not expose us.
+#![doc(hidden)]
+#![unstable(
+ feature = "flt2dec",
+ reason = "internal routines only exposed for testing",
+ issue = "none"
+)]
+
+pub use self::decoder::{decode, DecodableFloat, Decoded, FullDecoded};
+
+use crate::mem::MaybeUninit;
+
+pub mod decoder;
+pub mod estimator;
+
+/// Digit-generation algorithms.
+pub mod strategy {
+ pub mod dragon;
+ pub mod grisu;
+}
+
+/// The minimum size of buffer necessary for the shortest mode.
+///
+/// It is a bit non-trivial to derive, but this is one plus the maximal number of
+/// significant decimal digits from formatting algorithms with the shortest result.
+/// The exact formula is `ceil(# bits in mantissa * log_10 2 + 1)`.
+pub const MAX_SIG_DIGITS: usize = 17;
+
+/// When `d` contains decimal digits, increase the last digit and propagate carry.
+/// Returns a next digit when it causes the length to change.
+#[doc(hidden)]
+pub fn round_up(d: &mut [u8]) -> Option<u8> {
+ match d.iter().rposition(|&c| c != b'9') {
+ Some(i) => {
+ // d[i+1..n] is all nines
+ d[i] += 1;
+ for j in i + 1..d.len() {
+ d[j] = b'0';
+ }
+ None
+ }
+ None if d.len() > 0 => {
+ // 999..999 rounds to 1000..000 with an increased exponent
+ d[0] = b'1';
+ for j in 1..d.len() {
+ d[j] = b'0';
+ }
+ Some(b'0')
+ }
+ None => {
+ // an empty buffer rounds up (a bit strange but reasonable)
+ Some(b'1')
+ }
+ }
+}
+
+/// Formatted parts.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Part<'a> {
+ /// Given number of zero digits.
+ Zero(usize),
+ /// A literal number up to 5 digits.
+ Num(u16),
+ /// A verbatim copy of given bytes.
+ Copy(&'a [u8]),
+}
+
+impl<'a> Part<'a> {
+ /// Returns the exact byte length of given part.
+ pub fn len(&self) -> usize {
+ match *self {
+ Part::Zero(nzeroes) => nzeroes,
+ Part::Num(v) => {
+ if v < 1_000 {
+ if v < 10 {
+ 1
+ } else if v < 100 {
+ 2
+ } else {
+ 3
+ }
+ } else {
+ if v < 10_000 { 4 } else { 5 }
+ }
+ }
+ Part::Copy(buf) => buf.len(),
+ }
+ }
+
+ /// Writes a part into the supplied buffer.
+ /// Returns the number of written bytes, or `None` if the buffer is not enough.
+ /// (It may still leave partially written bytes in the buffer; do not rely on that.)
+ pub fn write(&self, out: &mut [u8]) -> Option<usize> {
+ let len = self.len();
+ if out.len() >= len {
+ match *self {
+ Part::Zero(nzeroes) => {
+ for c in &mut out[..nzeroes] {
+ *c = b'0';
+ }
+ }
+ Part::Num(mut v) => {
+ for c in out[..len].iter_mut().rev() {
+ *c = b'0' + (v % 10) as u8;
+ v /= 10;
+ }
+ }
+ Part::Copy(buf) => {
+ out[..buf.len()].copy_from_slice(buf);
+ }
+ }
+ Some(len)
+ } else {
+ None
+ }
+ }
+}
+
+/// Formatted result containing one or more parts.
+/// This can be written to the byte buffer or converted to the allocated string.
+#[allow(missing_debug_implementations)]
+#[derive(Clone)]
+pub struct Formatted<'a> {
+ /// A byte slice representing a sign, either `""`, `"-"` or `"+"`.
+ pub sign: &'static str,
+ /// Formatted parts to be rendered after a sign and optional zero padding.
+ pub parts: &'a [Part<'a>],
+}
+
+impl<'a> Formatted<'a> {
+ /// Returns the exact byte length of combined formatted result.
+ pub fn len(&self) -> usize {
+ let mut len = self.sign.len();
+ for part in self.parts {
+ len += part.len();
+ }
+ len
+ }
+
+ /// Writes all formatted parts into the supplied buffer.
+ /// Returns the number of written bytes, or `None` if the buffer is not enough.
+ /// (It may still leave partially written bytes in the buffer; do not rely on that.)
+ pub fn write(&self, out: &mut [u8]) -> Option<usize> {
+ if out.len() < self.sign.len() {
+ return None;
+ }
+ out[..self.sign.len()].copy_from_slice(self.sign.as_bytes());
+
+ let mut written = self.sign.len();
+ for part in self.parts {
+ let len = part.write(&mut out[written..])?;
+ written += len;
+ }
+ Some(written)
+ }
+}
+
+/// Formats given decimal digits `0.<...buf...> * 10^exp` into the decimal form
+/// with at least given number of fractional digits. The result is stored to
+/// the supplied parts array and a slice of written parts is returned.
+///
+/// `frac_digits` can be less than the number of actual fractional digits in `buf`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `frac_digits` of 0 means that
+/// it will only print given digits and nothing else.
+fn digits_to_dec_str<'a>(
+ buf: &'a [u8],
+ exp: i16,
+ frac_digits: usize,
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> &'a [Part<'a>] {
+ assert!(!buf.is_empty());
+ assert!(buf[0] > b'0');
+ assert!(parts.len() >= 4);
+
+ // if there is the restriction on the last digit position, `buf` is assumed to be
+ // left-padded with the virtual zeroes. the number of virtual zeroes, `nzeroes`,
+ // equals to `max(0, exp + frac_digits - buf.len())`, so that the position of
+ // the last digit `exp - buf.len() - nzeroes` is no more than `-frac_digits`:
+ //
+ // |<-virtual->|
+ // |<---- buf ---->| zeroes | exp
+ // 0. 1 2 3 4 5 6 7 8 9 _ _ _ _ _ _ x 10
+ // | | |
+ // 10^exp 10^(exp-buf.len()) 10^(exp-buf.len()-nzeroes)
+ //
+ // `nzeroes` is individually calculated for each case in order to avoid overflow.
+
+ if exp <= 0 {
+ // the decimal point is before rendered digits: [0.][000...000][1234][____]
+ let minus_exp = -(exp as i32) as usize;
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(minus_exp));
+ parts[2] = MaybeUninit::new(Part::Copy(buf));
+ if frac_digits > buf.len() && frac_digits - buf.len() > minus_exp {
+ parts[3] = MaybeUninit::new(Part::Zero((frac_digits - buf.len()) - minus_exp));
+ // SAFETY: we just initialized the elements `..4`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..4]) }
+ } else {
+ // SAFETY: we just initialized the elements `..3`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..3]) }
+ }
+ } else {
+ let exp = exp as usize;
+ if exp < buf.len() {
+ // the decimal point is inside rendered digits: [12][.][34][____]
+ parts[0] = MaybeUninit::new(Part::Copy(&buf[..exp]));
+ parts[1] = MaybeUninit::new(Part::Copy(b"."));
+ parts[2] = MaybeUninit::new(Part::Copy(&buf[exp..]));
+ if frac_digits > buf.len() - exp {
+ parts[3] = MaybeUninit::new(Part::Zero(frac_digits - (buf.len() - exp)));
+ // SAFETY: we just initialized the elements `..4`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..4]) }
+ } else {
+ // SAFETY: we just initialized the elements `..3`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..3]) }
+ }
+ } else {
+ // the decimal point is after rendered digits: [1234][____0000] or [1234][__][.][__].
+ parts[0] = MaybeUninit::new(Part::Copy(buf));
+ parts[1] = MaybeUninit::new(Part::Zero(exp - buf.len()));
+ if frac_digits > 0 {
+ parts[2] = MaybeUninit::new(Part::Copy(b"."));
+ parts[3] = MaybeUninit::new(Part::Zero(frac_digits));
+ // SAFETY: we just initialized the elements `..4`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..4]) }
+ } else {
+ // SAFETY: we just initialized the elements `..2`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) }
+ }
+ }
+ }
+}
+
+/// Formats the given decimal digits `0.<...buf...> * 10^exp` into the exponential
+/// form with at least the given number of significant digits. When `upper` is `true`,
+/// the exponent will be prefixed by `E`; otherwise that's `e`. The result is
+/// stored to the supplied parts array and a slice of written parts is returned.
+///
+/// `min_digits` can be less than the number of actual significant digits in `buf`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus, `min_digits == 0` means that
+/// it will only print the given digits and nothing else.
+fn digits_to_exp_str<'a>(
+ buf: &'a [u8],
+ exp: i16,
+ min_ndigits: usize,
+ upper: bool,
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> &'a [Part<'a>] {
+ assert!(!buf.is_empty());
+ assert!(buf[0] > b'0');
+ assert!(parts.len() >= 6);
+
+ let mut n = 0;
+
+ parts[n] = MaybeUninit::new(Part::Copy(&buf[..1]));
+ n += 1;
+
+ if buf.len() > 1 || min_ndigits > 1 {
+ parts[n] = MaybeUninit::new(Part::Copy(b"."));
+ parts[n + 1] = MaybeUninit::new(Part::Copy(&buf[1..]));
+ n += 2;
+ if min_ndigits > buf.len() {
+ parts[n] = MaybeUninit::new(Part::Zero(min_ndigits - buf.len()));
+ n += 1;
+ }
+ }
+
+ // 0.1234 x 10^exp = 1.234 x 10^(exp-1)
+ let exp = exp as i32 - 1; // avoid underflow when exp is i16::MIN
+ if exp < 0 {
+ parts[n] = MaybeUninit::new(Part::Copy(if upper { b"E-" } else { b"e-" }));
+ parts[n + 1] = MaybeUninit::new(Part::Num(-exp as u16));
+ } else {
+ parts[n] = MaybeUninit::new(Part::Copy(if upper { b"E" } else { b"e" }));
+ parts[n + 1] = MaybeUninit::new(Part::Num(exp as u16));
+ }
+ // SAFETY: we just initialized the elements `..n + 2`.
+ unsafe { MaybeUninit::slice_assume_init_ref(&parts[..n + 2]) }
+}
+
+/// Sign formatting options.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Sign {
+ /// Prints `-` only for the negative non-zero values.
+ Minus, // -inf -1 0 0 1 inf nan
+ /// Prints `-` only for any negative values (including the negative zero).
+ MinusRaw, // -inf -1 -0 0 1 inf nan
+ /// Prints `-` for the negative non-zero values, or `+` otherwise.
+ MinusPlus, // -inf -1 +0 +0 +1 +inf nan
+ /// Prints `-` for any negative values (including the negative zero), or `+` otherwise.
+ MinusPlusRaw, // -inf -1 -0 +0 +1 +inf nan
+}
+
+/// Returns the static byte string corresponding to the sign to be formatted.
+/// It can be either `""`, `"+"` or `"-"`.
+fn determine_sign(sign: Sign, decoded: &FullDecoded, negative: bool) -> &'static str {
+ match (*decoded, sign) {
+ (FullDecoded::Nan, _) => "",
+ (FullDecoded::Zero, Sign::Minus) => "",
+ (FullDecoded::Zero, Sign::MinusRaw) => {
+ if negative {
+ "-"
+ } else {
+ ""
+ }
+ }
+ (FullDecoded::Zero, Sign::MinusPlus) => "+",
+ (FullDecoded::Zero, Sign::MinusPlusRaw) => {
+ if negative {
+ "-"
+ } else {
+ "+"
+ }
+ }
+ (_, Sign::Minus | Sign::MinusRaw) => {
+ if negative {
+ "-"
+ } else {
+ ""
+ }
+ }
+ (_, Sign::MinusPlus | Sign::MinusPlusRaw) => {
+ if negative {
+ "-"
+ } else {
+ "+"
+ }
+ }
+ }
+}
+
+/// Formats the given floating point number into the decimal form with at least
+/// given number of fractional digits. The result is stored to the supplied parts
+/// array while utilizing given byte buffer as a scratch. `upper` is currently
+/// unused but left for the future decision to change the case of non-finite values,
+/// i.e., `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
+/// (which can be an empty string if no sign is rendered).
+///
+/// `format_shortest` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_shortest` for this.
+///
+/// `frac_digits` can be less than the number of actual fractional digits in `v`;
+/// it will be ignored and full digits will be printed. It is only used to print
+/// additional zeroes after rendered digits. Thus `frac_digits` of 0 means that
+/// it will only print given digits and nothing else.
+///
+/// The byte buffer should be at least `MAX_SIG_DIGITS` bytes long.
+/// There should be at least 4 parts available, due to the worst case like
+/// `[+][0.][0000][2][0000]` with `frac_digits = 10`.
+pub fn to_shortest_str<'a, T, F>(
+ mut format_shortest: F,
+ v: T,
+ sign: Sign,
+ frac_digits: usize,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 4);
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ if frac_digits > 0 {
+ // [0.][0000]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..2`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(b"0"));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let (buf, exp) = format_shortest(decoded, buf);
+ Formatted { sign, parts: digits_to_dec_str(buf, exp, frac_digits, parts) }
+ }
+ }
+}
+
+/// Formats the given floating point number into the decimal form or
+/// the exponential form, depending on the resulting exponent. The result is
+/// stored to the supplied parts array while utilizing given byte buffer
+/// as a scratch. `upper` is used to determine the case of non-finite values
+/// (`inf` and `nan`) or the case of the exponent prefix (`e` or `E`).
+/// The first part to be rendered is always a `Part::Sign` (which can be
+/// an empty string if no sign is rendered).
+///
+/// `format_shortest` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_shortest` for this.
+///
+/// The `dec_bounds` is a tuple `(lo, hi)` such that the number is formatted
+/// as decimal only when `10^lo <= V < 10^hi`. Note that this is the *apparent* `V`
+/// instead of the actual `v`! Thus any printed exponent in the exponential form
+/// cannot be in this range, avoiding any confusion.
+///
+/// The byte buffer should be at least `MAX_SIG_DIGITS` bytes long.
+/// There should be at least 6 parts available, due to the worst case like
+/// `[+][1][.][2345][e][-][6]`.
+pub fn to_shortest_exp_str<'a, T, F>(
+ mut format_shortest: F,
+ v: T,
+ sign: Sign,
+ dec_bounds: (i16, i16),
+ upper: bool,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 6);
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+ assert!(dec_bounds.0 <= dec_bounds.1);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ parts[0] = if dec_bounds.0 <= 0 && 0 < dec_bounds.1 {
+ MaybeUninit::new(Part::Copy(b"0"))
+ } else {
+ MaybeUninit::new(Part::Copy(if upper { b"0E0" } else { b"0e0" }))
+ };
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let (buf, exp) = format_shortest(decoded, buf);
+ let vis_exp = exp as i32 - 1;
+ let parts = if dec_bounds.0 as i32 <= vis_exp && vis_exp < dec_bounds.1 as i32 {
+ digits_to_dec_str(buf, exp, 0, parts)
+ } else {
+ digits_to_exp_str(buf, exp, 0, upper, parts)
+ };
+ Formatted { sign, parts }
+ }
+ }
+}
+
+/// Returns a rather crude approximation (upper bound) for the maximum buffer size
+/// calculated from the given decoded exponent.
+///
+/// The exact limit is:
+///
+/// - when `exp < 0`, the maximum length is `ceil(log_10 (5^-exp * (2^64 - 1)))`.
+/// - when `exp >= 0`, the maximum length is `ceil(log_10 (2^exp * (2^64 - 1)))`.
+///
+/// `ceil(log_10 (x^exp * (2^64 - 1)))` is less than `ceil(log_10 (2^64 - 1)) +
+/// ceil(exp * log_10 x)`, which is in turn less than `20 + (1 + exp * log_10 x)`.
+/// We use the facts that `log_10 2 < 5/16` and `log_10 5 < 12/16`, which is
+/// enough for our purposes.
+///
+/// Why do we need this? `format_exact` functions will fill the entire buffer
+/// unless limited by the last digit restriction, but it is possible that
+/// the number of digits requested is ridiculously large (say, 30,000 digits).
+/// The vast majority of buffer will be filled with zeroes, so we don't want to
+/// allocate all the buffer beforehand. Consequently, for any given arguments,
+/// 826 bytes of buffer should be sufficient for `f64`. Compare this with
+/// the actual number for the worst case: 770 bytes (when `exp = -1074`).
+fn estimate_max_buf_len(exp: i16) -> usize {
+ 21 + ((if exp < 0 { -12 } else { 5 } * exp as i32) as usize >> 4)
+}
+
+/// Formats given floating point number into the exponential form with
+/// exactly given number of significant digits. The result is stored to
+/// the supplied parts array while utilizing given byte buffer as a scratch.
+/// `upper` is used to determine the case of the exponent prefix (`e` or `E`).
+/// The first part to be rendered is always a `Part::Sign` (which can be
+/// an empty string if no sign is rendered).
+///
+/// `format_exact` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_exact` for this.
+///
+/// The byte buffer should be at least `ndigits` bytes long unless `ndigits` is
+/// so large that only the fixed number of digits will be ever written.
+/// (The tipping point for `f64` is about 800, so 1000 bytes should be enough.)
+/// There should be at least 6 parts available, due to the worst case like
+/// `[+][1][.][2345][e][-][6]`.
+pub fn to_exact_exp_str<'a, T, F>(
+ mut format_exact: F,
+ v: T,
+ sign: Sign,
+ ndigits: usize,
+ upper: bool,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 6);
+ assert!(ndigits > 0);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ if ndigits > 1 {
+ // [0.][0000][e0]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(ndigits - 1));
+ parts[2] = MaybeUninit::new(Part::Copy(if upper { b"E0" } else { b"e0" }));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..3`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..3]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(if upper { b"0E0" } else { b"0e0" }));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let maxlen = estimate_max_buf_len(decoded.exp);
+ assert!(buf.len() >= ndigits || buf.len() >= maxlen);
+
+ let trunc = if ndigits < maxlen { ndigits } else { maxlen };
+ let (buf, exp) = format_exact(decoded, &mut buf[..trunc], i16::MIN);
+ Formatted { sign, parts: digits_to_exp_str(buf, exp, ndigits, upper, parts) }
+ }
+ }
+}
+
+/// Formats given floating point number into the decimal form with exactly
+/// given number of fractional digits. The result is stored to the supplied parts
+/// array while utilizing given byte buffer as a scratch. `upper` is currently
+/// unused but left for the future decision to change the case of non-finite values,
+/// i.e., `inf` and `nan`. The first part to be rendered is always a `Part::Sign`
+/// (which can be an empty string if no sign is rendered).
+///
+/// `format_exact` should be the underlying digit-generation function.
+/// It should return the part of the buffer that it initialized.
+/// You probably would want `strategy::grisu::format_exact` for this.
+///
+/// The byte buffer should be enough for the output unless `frac_digits` is
+/// so large that only the fixed number of digits will be ever written.
+/// (The tipping point for `f64` is about 800, and 1000 bytes should be enough.)
+/// There should be at least 4 parts available, due to the worst case like
+/// `[+][0.][0000][2][0000]` with `frac_digits = 10`.
+pub fn to_exact_fixed_str<'a, T, F>(
+ mut format_exact: F,
+ v: T,
+ sign: Sign,
+ frac_digits: usize,
+ buf: &'a mut [MaybeUninit<u8>],
+ parts: &'a mut [MaybeUninit<Part<'a>>],
+) -> Formatted<'a>
+where
+ T: DecodableFloat,
+ F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ assert!(parts.len() >= 4);
+
+ let (negative, full_decoded) = decode(v);
+ let sign = determine_sign(sign, &full_decoded, negative);
+ match full_decoded {
+ FullDecoded::Nan => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Infinite => {
+ parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
+ // SAFETY: we just initialized the elements `..1`.
+ Formatted { sign, parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) } }
+ }
+ FullDecoded::Zero => {
+ if frac_digits > 0 {
+ // [0.][0000]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..2`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(b"0"));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ }
+ FullDecoded::Finite(ref decoded) => {
+ let maxlen = estimate_max_buf_len(decoded.exp);
+ assert!(buf.len() >= maxlen);
+
+ // it *is* possible that `frac_digits` is ridiculously large.
+ // `format_exact` will end rendering digits much earlier in this case,
+ // because we are strictly limited by `maxlen`.
+ let limit = if frac_digits < 0x8000 { -(frac_digits as i16) } else { i16::MIN };
+ let (buf, exp) = format_exact(decoded, &mut buf[..maxlen], limit);
+ if exp <= limit {
+ // the restriction couldn't been met, so this should render like zero no matter
+ // `exp` was. this does not include the case that the restriction has been met
+ // only after the final rounding-up; it's a regular case with `exp = limit + 1`.
+ debug_assert_eq!(buf.len(), 0);
+ if frac_digits > 0 {
+ // [0.][0000]
+ parts[0] = MaybeUninit::new(Part::Copy(b"0."));
+ parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..2`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..2]) },
+ }
+ } else {
+ parts[0] = MaybeUninit::new(Part::Copy(b"0"));
+ Formatted {
+ sign,
+ // SAFETY: we just initialized the elements `..1`.
+ parts: unsafe { MaybeUninit::slice_assume_init_ref(&parts[..1]) },
+ }
+ }
+ } else {
+ Formatted { sign, parts: digits_to_dec_str(buf, exp, frac_digits, parts) }
+ }
+ }
+ }
+}
--- /dev/null
+//! Almost direct (but slightly optimized) Rust translation of Figure 3 of "Printing
+//! Floating-Point Numbers Quickly and Accurately"[^1].
+//!
+//! [^1]: Burger, R. G. and Dybvig, R. K. 1996. Printing floating-point numbers
+//! quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116.
+
+use crate::cmp::Ordering;
+use crate::mem::MaybeUninit;
+
+use crate::num::bignum::Big32x40 as Big;
+use crate::num::bignum::Digit32 as Digit;
+use crate::num::flt2dec::estimator::estimate_scaling_factor;
+use crate::num::flt2dec::{round_up, Decoded, MAX_SIG_DIGITS};
+
+static POW10: [Digit; 10] =
+ [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000];
+static TWOPOW10: [Digit; 10] =
+ [2, 20, 200, 2000, 20000, 200000, 2000000, 20000000, 200000000, 2000000000];
+
+// precalculated arrays of `Digit`s for 10^(2^n)
+static POW10TO16: [Digit; 2] = [0x6fc10000, 0x2386f2];
+static POW10TO32: [Digit; 4] = [0, 0x85acef81, 0x2d6d415b, 0x4ee];
+static POW10TO64: [Digit; 7] = [0, 0, 0xbf6a1f01, 0x6e38ed64, 0xdaa797ed, 0xe93ff9f4, 0x184f03];
+static POW10TO128: [Digit; 14] = [
+ 0, 0, 0, 0, 0x2e953e01, 0x3df9909, 0xf1538fd, 0x2374e42f, 0xd3cff5ec, 0xc404dc08, 0xbccdb0da,
+ 0xa6337f19, 0xe91f2603, 0x24e,
+];
+static POW10TO256: [Digit; 27] = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0x982e7c01, 0xbed3875b, 0xd8d99f72, 0x12152f87, 0x6bde50c6, 0xcf4a6e70,
+ 0xd595d80f, 0x26b2716e, 0xadc666b0, 0x1d153624, 0x3c42d35a, 0x63ff540e, 0xcc5573c0, 0x65f9ef17,
+ 0x55bc28f2, 0x80dcc7f7, 0xf46eeddc, 0x5fdcefce, 0x553f7,
+];
+
+#[doc(hidden)]
+pub fn mul_pow10(x: &mut Big, n: usize) -> &mut Big {
+ debug_assert!(n < 512);
+ if n & 7 != 0 {
+ x.mul_small(POW10[n & 7]);
+ }
+ if n & 8 != 0 {
+ x.mul_small(POW10[8]);
+ }
+ if n & 16 != 0 {
+ x.mul_digits(&POW10TO16);
+ }
+ if n & 32 != 0 {
+ x.mul_digits(&POW10TO32);
+ }
+ if n & 64 != 0 {
+ x.mul_digits(&POW10TO64);
+ }
+ if n & 128 != 0 {
+ x.mul_digits(&POW10TO128);
+ }
+ if n & 256 != 0 {
+ x.mul_digits(&POW10TO256);
+ }
+ x
+}
+
+fn div_2pow10(x: &mut Big, mut n: usize) -> &mut Big {
+ let largest = POW10.len() - 1;
+ while n > largest {
+ x.div_rem_small(POW10[largest]);
+ n -= largest;
+ }
+ x.div_rem_small(TWOPOW10[n]);
+ x
+}
+
+// only usable when `x < 16 * scale`; `scaleN` should be `scale.mul_small(N)`
+fn div_rem_upto_16<'a>(
+ x: &'a mut Big,
+ scale: &Big,
+ scale2: &Big,
+ scale4: &Big,
+ scale8: &Big,
+) -> (u8, &'a mut Big) {
+ let mut d = 0;
+ if *x >= *scale8 {
+ x.sub(scale8);
+ d += 8;
+ }
+ if *x >= *scale4 {
+ x.sub(scale4);
+ d += 4;
+ }
+ if *x >= *scale2 {
+ x.sub(scale2);
+ d += 2;
+ }
+ if *x >= *scale {
+ x.sub(scale);
+ d += 1;
+ }
+ debug_assert!(*x < *scale);
+ (d, x)
+}
+
+/// The shortest mode implementation for Dragon.
+pub fn format_shortest<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ // the number `v` to format is known to be:
+ // - equal to `mant * 2^exp`;
+ // - preceded by `(mant - 2 * minus) * 2^exp` in the original type; and
+ // - followed by `(mant + 2 * plus) * 2^exp` in the original type.
+ //
+ // obviously, `minus` and `plus` cannot be zero. (for infinities, we use out-of-range values.)
+ // also we assume that at least one digit is generated, i.e., `mant` cannot be zero too.
+ //
+ // this also means that any number between `low = (mant - minus) * 2^exp` and
+ // `high = (mant + plus) * 2^exp` will map to this exact floating point number,
+ // with bounds included when the original mantissa was even (i.e., `!mant_was_odd`).
+
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+
+ // `a.cmp(&b) < rounding` is `if d.inclusive {a <= b} else {a < b}`
+ let rounding = if d.inclusive { Ordering::Greater } else { Ordering::Equal };
+
+ // estimate `k_0` from original inputs satisfying `10^(k_0-1) < high <= 10^(k_0+1)`.
+ // the tight bound `k` satisfying `10^(k-1) < high <= 10^k` is calculated later.
+ let mut k = estimate_scaling_factor(d.mant + d.plus, d.exp);
+
+ // convert `{mant, plus, minus} * 2^exp` into the fractional form so that:
+ // - `v = mant / scale`
+ // - `low = (mant - minus) / scale`
+ // - `high = (mant + plus) / scale`
+ let mut mant = Big::from_u64(d.mant);
+ let mut minus = Big::from_u64(d.minus);
+ let mut plus = Big::from_u64(d.plus);
+ let mut scale = Big::from_small(1);
+ if d.exp < 0 {
+ scale.mul_pow2(-d.exp as usize);
+ } else {
+ mant.mul_pow2(d.exp as usize);
+ minus.mul_pow2(d.exp as usize);
+ plus.mul_pow2(d.exp as usize);
+ }
+
+ // divide `mant` by `10^k`. now `scale / 10 < mant + plus <= scale * 10`.
+ if k >= 0 {
+ mul_pow10(&mut scale, k as usize);
+ } else {
+ mul_pow10(&mut mant, -k as usize);
+ mul_pow10(&mut minus, -k as usize);
+ mul_pow10(&mut plus, -k as usize);
+ }
+
+ // fixup when `mant + plus > scale` (or `>=`).
+ // we are not actually modifying `scale`, since we can skip the initial multiplication instead.
+ // now `scale < mant + plus <= scale * 10` and we are ready to generate digits.
+ //
+ // note that `d[0]` *can* be zero, when `scale - plus < mant < scale`.
+ // in this case rounding-up condition (`up` below) will be triggered immediately.
+ if scale.cmp(mant.clone().add(&plus)) < rounding {
+ // equivalent to scaling `scale` by 10
+ k += 1;
+ } else {
+ mant.mul_small(10);
+ minus.mul_small(10);
+ plus.mul_small(10);
+ }
+
+ // cache `(2, 4, 8) * scale` for digit generation.
+ let mut scale2 = scale.clone();
+ scale2.mul_pow2(1);
+ let mut scale4 = scale.clone();
+ scale4.mul_pow2(2);
+ let mut scale8 = scale.clone();
+ scale8.mul_pow2(3);
+
+ let mut down;
+ let mut up;
+ let mut i = 0;
+ loop {
+ // invariants, where `d[0..n-1]` are digits generated so far:
+ // - `v = mant / scale * 10^(k-n-1) + d[0..n-1] * 10^(k-n)`
+ // - `v - low = minus / scale * 10^(k-n-1)`
+ // - `high - v = plus / scale * 10^(k-n-1)`
+ // - `(mant + plus) / scale <= 10` (thus `mant / scale < 10`)
+ // where `d[i..j]` is a shorthand for `d[i] * 10^(j-i) + ... + d[j-1] * 10 + d[j]`.
+
+ // generate one digit: `d[n] = floor(mant / scale) < 10`.
+ let (d, _) = div_rem_upto_16(&mut mant, &scale, &scale2, &scale4, &scale8);
+ debug_assert!(d < 10);
+ buf[i] = MaybeUninit::new(b'0' + d);
+ i += 1;
+
+ // this is a simplified description of the modified Dragon algorithm.
+ // many intermediate derivations and completeness arguments are omitted for convenience.
+ //
+ // start with modified invariants, as we've updated `n`:
+ // - `v = mant / scale * 10^(k-n) + d[0..n-1] * 10^(k-n)`
+ // - `v - low = minus / scale * 10^(k-n)`
+ // - `high - v = plus / scale * 10^(k-n)`
+ //
+ // assume that `d[0..n-1]` is the shortest representation between `low` and `high`,
+ // i.e., `d[0..n-1]` satisfies both of the following but `d[0..n-2]` doesn't:
+ // - `low < d[0..n-1] * 10^(k-n) < high` (bijectivity: digits round to `v`); and
+ // - `abs(v / 10^(k-n) - d[0..n-1]) <= 1/2` (the last digit is correct).
+ //
+ // the second condition simplifies to `2 * mant <= scale`.
+ // solving invariants in terms of `mant`, `low` and `high` yields
+ // a simpler version of the first condition: `-plus < mant < minus`.
+ // since `-plus < 0 <= mant`, we have the correct shortest representation
+ // when `mant < minus` and `2 * mant <= scale`.
+ // (the former becomes `mant <= minus` when the original mantissa is even.)
+ //
+ // when the second doesn't hold (`2 * mant > scale`), we need to increase the last digit.
+ // this is enough for restoring that condition: we already know that
+ // the digit generation guarantees `0 <= v / 10^(k-n) - d[0..n-1] < 1`.
+ // in this case, the first condition becomes `-plus < mant - scale < minus`.
+ // since `mant < scale` after the generation, we have `scale < mant + plus`.
+ // (again, this becomes `scale <= mant + plus` when the original mantissa is even.)
+ //
+ // in short:
+ // - stop and round `down` (keep digits as is) when `mant < minus` (or `<=`).
+ // - stop and round `up` (increase the last digit) when `scale < mant + plus` (or `<=`).
+ // - keep generating otherwise.
+ down = mant.cmp(&minus) < rounding;
+ up = scale.cmp(mant.clone().add(&plus)) < rounding;
+ if down || up {
+ break;
+ } // we have the shortest representation, proceed to the rounding
+
+ // restore the invariants.
+ // this makes the algorithm always terminating: `minus` and `plus` always increases,
+ // but `mant` is clipped modulo `scale` and `scale` is fixed.
+ mant.mul_small(10);
+ minus.mul_small(10);
+ plus.mul_small(10);
+ }
+
+ // rounding up happens when
+ // i) only the rounding-up condition was triggered, or
+ // ii) both conditions were triggered and tie breaking prefers rounding up.
+ if up && (!down || *mant.mul_pow2(1) >= scale) {
+ // if rounding up changes the length, the exponent should also change.
+ // it seems that this condition is very hard to satisfy (possibly impossible),
+ // but we are just being safe and consistent here.
+ // SAFETY: we initialized that memory above.
+ if let Some(c) = round_up(unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..i]) }) {
+ buf[i] = MaybeUninit::new(c);
+ i += 1;
+ k += 1;
+ }
+ }
+
+ // SAFETY: we initialized that memory above.
+ (unsafe { MaybeUninit::slice_assume_init_ref(&buf[..i]) }, k)
+}
+
+/// The exact and fixed mode implementation for Dragon.
+pub fn format_exact<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+ limit: i16,
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+
+ // estimate `k_0` from original inputs satisfying `10^(k_0-1) < v <= 10^(k_0+1)`.
+ let mut k = estimate_scaling_factor(d.mant, d.exp);
+
+ // `v = mant / scale`.
+ let mut mant = Big::from_u64(d.mant);
+ let mut scale = Big::from_small(1);
+ if d.exp < 0 {
+ scale.mul_pow2(-d.exp as usize);
+ } else {
+ mant.mul_pow2(d.exp as usize);
+ }
+
+ // divide `mant` by `10^k`. now `scale / 10 < mant <= scale * 10`.
+ if k >= 0 {
+ mul_pow10(&mut scale, k as usize);
+ } else {
+ mul_pow10(&mut mant, -k as usize);
+ }
+
+ // fixup when `mant + plus >= scale`, where `plus / scale = 10^-buf.len() / 2`.
+ // in order to keep the fixed-size bignum, we actually use `mant + floor(plus) >= scale`.
+ // we are not actually modifying `scale`, since we can skip the initial multiplication instead.
+ // again with the shortest algorithm, `d[0]` can be zero but will be eventually rounded up.
+ if *div_2pow10(&mut scale.clone(), buf.len()).add(&mant) >= scale {
+ // equivalent to scaling `scale` by 10
+ k += 1;
+ } else {
+ mant.mul_small(10);
+ }
+
+ // if we are working with the last-digit limitation, we need to shorten the buffer
+ // before the actual rendering in order to avoid double rounding.
+ // note that we have to enlarge the buffer again when rounding up happens!
+ let mut len = if k < limit {
+ // oops, we cannot even produce *one* digit.
+ // this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
+ // we return an empty buffer, with an exception of the later rounding-up case
+ // which occurs when `k == limit` and has to produce exactly one digit.
+ 0
+ } else if ((k as i32 - limit as i32) as usize) < buf.len() {
+ (k - limit) as usize
+ } else {
+ buf.len()
+ };
+
+ if len > 0 {
+ // cache `(2, 4, 8) * scale` for digit generation.
+ // (this can be expensive, so do not calculate them when the buffer is empty.)
+ let mut scale2 = scale.clone();
+ scale2.mul_pow2(1);
+ let mut scale4 = scale.clone();
+ scale4.mul_pow2(2);
+ let mut scale8 = scale.clone();
+ scale8.mul_pow2(3);
+
+ for i in 0..len {
+ if mant.is_zero() {
+ // following digits are all zeroes, we stop here
+ // do *not* try to perform rounding! rather, fill remaining digits.
+ for c in &mut buf[i..len] {
+ *c = MaybeUninit::new(b'0');
+ }
+ // SAFETY: we initialized that memory above.
+ return (unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, k);
+ }
+
+ let mut d = 0;
+ if mant >= scale8 {
+ mant.sub(&scale8);
+ d += 8;
+ }
+ if mant >= scale4 {
+ mant.sub(&scale4);
+ d += 4;
+ }
+ if mant >= scale2 {
+ mant.sub(&scale2);
+ d += 2;
+ }
+ if mant >= scale {
+ mant.sub(&scale);
+ d += 1;
+ }
+ debug_assert!(mant < scale);
+ debug_assert!(d < 10);
+ buf[i] = MaybeUninit::new(b'0' + d);
+ mant.mul_small(10);
+ }
+ }
+
+ // rounding up if we stop in the middle of digits
+ // if the following digits are exactly 5000..., check the prior digit and try to
+ // round to even (i.e., avoid rounding up when the prior digit is even).
+ let order = mant.cmp(scale.mul_small(5));
+ if order == Ordering::Greater
+ || (order == Ordering::Equal
+ // SAFETY: `buf[len-1]` is initialized.
+ && (len == 0 || unsafe { buf[len - 1].assume_init() } & 1 == 1))
+ {
+ // if rounding up changes the length, the exponent should also change.
+ // but we've been requested a fixed number of digits, so do not alter the buffer...
+ // SAFETY: we initialized that memory above.
+ if let Some(c) = round_up(unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..len]) }) {
+ // ...unless we've been requested the fixed precision instead.
+ // we also need to check that, if the original buffer was empty,
+ // the additional digit can only be added when `k == limit` (edge case).
+ k += 1;
+ if k > limit && len < buf.len() {
+ buf[len] = MaybeUninit::new(c);
+ len += 1;
+ }
+ }
+ }
+
+ // SAFETY: we initialized that memory above.
+ (unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, k)
+}
--- /dev/null
+//! Rust adaptation of the Grisu3 algorithm described in "Printing Floating-Point Numbers Quickly
+//! and Accurately with Integers"[^1]. It uses about 1KB of precomputed table, and in turn, it's
+//! very quick for most inputs.
+//!
+//! [^1]: Florian Loitsch. 2010. Printing floating-point numbers quickly and
+//! accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243.
+
+use crate::mem::MaybeUninit;
+use crate::num::diy_float::Fp;
+use crate::num::flt2dec::{round_up, Decoded, MAX_SIG_DIGITS};
+
+// see the comments in `format_shortest_opt` for the rationale.
+#[doc(hidden)]
+pub const ALPHA: i16 = -60;
+#[doc(hidden)]
+pub const GAMMA: i16 = -32;
+
+/*
+# the following Python code generates this table:
+for i in xrange(-308, 333, 8):
+ if i >= 0: f = 10**i; e = 0
+ else: f = 2**(80-4*i) // 10**-i; e = 4 * i - 80
+ l = f.bit_length()
+ f = ((f << 64 >> (l-1)) + 1) >> 1; e += l - 64
+ print ' (%#018x, %5d, %4d),' % (f, e, i)
+*/
+
+#[doc(hidden)]
+pub static CACHED_POW10: [(u64, i16, i16); 81] = [
+ // (f, e, k)
+ (0xe61acf033d1a45df, -1087, -308),
+ (0xab70fe17c79ac6ca, -1060, -300),
+ (0xff77b1fcbebcdc4f, -1034, -292),
+ (0xbe5691ef416bd60c, -1007, -284),
+ (0x8dd01fad907ffc3c, -980, -276),
+ (0xd3515c2831559a83, -954, -268),
+ (0x9d71ac8fada6c9b5, -927, -260),
+ (0xea9c227723ee8bcb, -901, -252),
+ (0xaecc49914078536d, -874, -244),
+ (0x823c12795db6ce57, -847, -236),
+ (0xc21094364dfb5637, -821, -228),
+ (0x9096ea6f3848984f, -794, -220),
+ (0xd77485cb25823ac7, -768, -212),
+ (0xa086cfcd97bf97f4, -741, -204),
+ (0xef340a98172aace5, -715, -196),
+ (0xb23867fb2a35b28e, -688, -188),
+ (0x84c8d4dfd2c63f3b, -661, -180),
+ (0xc5dd44271ad3cdba, -635, -172),
+ (0x936b9fcebb25c996, -608, -164),
+ (0xdbac6c247d62a584, -582, -156),
+ (0xa3ab66580d5fdaf6, -555, -148),
+ (0xf3e2f893dec3f126, -529, -140),
+ (0xb5b5ada8aaff80b8, -502, -132),
+ (0x87625f056c7c4a8b, -475, -124),
+ (0xc9bcff6034c13053, -449, -116),
+ (0x964e858c91ba2655, -422, -108),
+ (0xdff9772470297ebd, -396, -100),
+ (0xa6dfbd9fb8e5b88f, -369, -92),
+ (0xf8a95fcf88747d94, -343, -84),
+ (0xb94470938fa89bcf, -316, -76),
+ (0x8a08f0f8bf0f156b, -289, -68),
+ (0xcdb02555653131b6, -263, -60),
+ (0x993fe2c6d07b7fac, -236, -52),
+ (0xe45c10c42a2b3b06, -210, -44),
+ (0xaa242499697392d3, -183, -36),
+ (0xfd87b5f28300ca0e, -157, -28),
+ (0xbce5086492111aeb, -130, -20),
+ (0x8cbccc096f5088cc, -103, -12),
+ (0xd1b71758e219652c, -77, -4),
+ (0x9c40000000000000, -50, 4),
+ (0xe8d4a51000000000, -24, 12),
+ (0xad78ebc5ac620000, 3, 20),
+ (0x813f3978f8940984, 30, 28),
+ (0xc097ce7bc90715b3, 56, 36),
+ (0x8f7e32ce7bea5c70, 83, 44),
+ (0xd5d238a4abe98068, 109, 52),
+ (0x9f4f2726179a2245, 136, 60),
+ (0xed63a231d4c4fb27, 162, 68),
+ (0xb0de65388cc8ada8, 189, 76),
+ (0x83c7088e1aab65db, 216, 84),
+ (0xc45d1df942711d9a, 242, 92),
+ (0x924d692ca61be758, 269, 100),
+ (0xda01ee641a708dea, 295, 108),
+ (0xa26da3999aef774a, 322, 116),
+ (0xf209787bb47d6b85, 348, 124),
+ (0xb454e4a179dd1877, 375, 132),
+ (0x865b86925b9bc5c2, 402, 140),
+ (0xc83553c5c8965d3d, 428, 148),
+ (0x952ab45cfa97a0b3, 455, 156),
+ (0xde469fbd99a05fe3, 481, 164),
+ (0xa59bc234db398c25, 508, 172),
+ (0xf6c69a72a3989f5c, 534, 180),
+ (0xb7dcbf5354e9bece, 561, 188),
+ (0x88fcf317f22241e2, 588, 196),
+ (0xcc20ce9bd35c78a5, 614, 204),
+ (0x98165af37b2153df, 641, 212),
+ (0xe2a0b5dc971f303a, 667, 220),
+ (0xa8d9d1535ce3b396, 694, 228),
+ (0xfb9b7cd9a4a7443c, 720, 236),
+ (0xbb764c4ca7a44410, 747, 244),
+ (0x8bab8eefb6409c1a, 774, 252),
+ (0xd01fef10a657842c, 800, 260),
+ (0x9b10a4e5e9913129, 827, 268),
+ (0xe7109bfba19c0c9d, 853, 276),
+ (0xac2820d9623bf429, 880, 284),
+ (0x80444b5e7aa7cf85, 907, 292),
+ (0xbf21e44003acdd2d, 933, 300),
+ (0x8e679c2f5e44ff8f, 960, 308),
+ (0xd433179d9c8cb841, 986, 316),
+ (0x9e19db92b4e31ba9, 1013, 324),
+ (0xeb96bf6ebadf77d9, 1039, 332),
+];
+
+#[doc(hidden)]
+pub const CACHED_POW10_FIRST_E: i16 = -1087;
+#[doc(hidden)]
+pub const CACHED_POW10_LAST_E: i16 = 1039;
+
+#[doc(hidden)]
+pub fn cached_power(alpha: i16, gamma: i16) -> (i16, Fp) {
+ let offset = CACHED_POW10_FIRST_E as i32;
+ let range = (CACHED_POW10.len() as i32) - 1;
+ let domain = (CACHED_POW10_LAST_E - CACHED_POW10_FIRST_E) as i32;
+ let idx = ((gamma as i32) - offset) * range / domain;
+ let (f, e, k) = CACHED_POW10[idx as usize];
+ debug_assert!(alpha <= e && e <= gamma);
+ (k, Fp { f, e })
+}
+
+/// Given `x > 0`, returns `(k, 10^k)` such that `10^k <= x < 10^(k+1)`.
+#[doc(hidden)]
+pub fn max_pow10_no_more_than(x: u32) -> (u8, u32) {
+ debug_assert!(x > 0);
+
+ const X9: u32 = 10_0000_0000;
+ const X8: u32 = 1_0000_0000;
+ const X7: u32 = 1000_0000;
+ const X6: u32 = 100_0000;
+ const X5: u32 = 10_0000;
+ const X4: u32 = 1_0000;
+ const X3: u32 = 1000;
+ const X2: u32 = 100;
+ const X1: u32 = 10;
+
+ if x < X4 {
+ if x < X2 {
+ if x < X1 { (0, 1) } else { (1, X1) }
+ } else {
+ if x < X3 { (2, X2) } else { (3, X3) }
+ }
+ } else {
+ if x < X6 {
+ if x < X5 { (4, X4) } else { (5, X5) }
+ } else if x < X8 {
+ if x < X7 { (6, X6) } else { (7, X7) }
+ } else {
+ if x < X9 { (8, X8) } else { (9, X9) }
+ }
+ }
+}
+
+/// The shortest mode implementation for Grisu.
+///
+/// It returns `None` when it would return an inexact representation otherwise.
+pub fn format_shortest_opt<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+) -> Option<(/*digits*/ &'a [u8], /*exp*/ i16)> {
+ assert!(d.mant > 0);
+ assert!(d.minus > 0);
+ assert!(d.plus > 0);
+ assert!(d.mant.checked_add(d.plus).is_some());
+ assert!(d.mant.checked_sub(d.minus).is_some());
+ assert!(buf.len() >= MAX_SIG_DIGITS);
+ assert!(d.mant + d.plus < (1 << 61)); // we need at least three bits of additional precision
+
+ // start with the normalized values with the shared exponent
+ let plus = Fp { f: d.mant + d.plus, e: d.exp }.normalize();
+ let minus = Fp { f: d.mant - d.minus, e: d.exp }.normalize_to(plus.e);
+ let v = Fp { f: d.mant, e: d.exp }.normalize_to(plus.e);
+
+ // find any `cached = 10^minusk` such that `ALPHA <= minusk + plus.e + 64 <= GAMMA`.
+ // since `plus` is normalized, this means `2^(62 + ALPHA) <= plus * cached < 2^(64 + GAMMA)`;
+ // given our choices of `ALPHA` and `GAMMA`, this puts `plus * cached` into `[4, 2^32)`.
+ //
+ // it is obviously desirable to maximize `GAMMA - ALPHA`,
+ // so that we don't need many cached powers of 10, but there are some considerations:
+ //
+ // 1. we want to keep `floor(plus * cached)` within `u32` since it needs a costly division.
+ // (this is not really avoidable, remainder is required for accuracy estimation.)
+ // 2. the remainder of `floor(plus * cached)` repeatedly gets multiplied by 10,
+ // and it should not overflow.
+ //
+ // the first gives `64 + GAMMA <= 32`, while the second gives `10 * 2^-ALPHA <= 2^64`;
+ // -60 and -32 is the maximal range with this constraint, and V8 also uses them.
+ let (minusk, cached) = cached_power(ALPHA - plus.e - 64, GAMMA - plus.e - 64);
+
+ // scale fps. this gives the maximal error of 1 ulp (proved from Theorem 5.1).
+ let plus = plus.mul(&cached);
+ let minus = minus.mul(&cached);
+ let v = v.mul(&cached);
+ debug_assert_eq!(plus.e, minus.e);
+ debug_assert_eq!(plus.e, v.e);
+
+ // +- actual range of minus
+ // | <---|---------------------- unsafe region --------------------------> |
+ // | | |
+ // | |<--->| | <--------------- safe region ---------------> | |
+ // | | | | | |
+ // |1 ulp|1 ulp| |1 ulp|1 ulp| |1 ulp|1 ulp|
+ // |<--->|<--->| |<--->|<--->| |<--->|<--->|
+ // |-----|-----|-------...-------|-----|-----|-------...-------|-----|-----|
+ // | minus | | v | | plus |
+ // minus1 minus0 v - 1 ulp v + 1 ulp plus0 plus1
+ //
+ // above `minus`, `v` and `plus` are *quantized* approximations (error < 1 ulp).
+ // as we don't know the error is positive or negative, we use two approximations spaced equally
+ // and have the maximal error of 2 ulps.
+ //
+ // the "unsafe region" is a liberal interval which we initially generate.
+ // the "safe region" is a conservative interval which we only accept.
+ // we start with the correct repr within the unsafe region, and try to find the closest repr
+ // to `v` which is also within the safe region. if we can't, we give up.
+ let plus1 = plus.f + 1;
+ // let plus0 = plus.f - 1; // only for explanation
+ // let minus0 = minus.f + 1; // only for explanation
+ let minus1 = minus.f - 1;
+ let e = -plus.e as usize; // shared exponent
+
+ // divide `plus1` into integral and fractional parts.
+ // integral parts are guaranteed to fit in u32, since cached power guarantees `plus < 2^32`
+ // and normalized `plus.f` is always less than `2^64 - 2^4` due to the precision requirement.
+ let plus1int = (plus1 >> e) as u32;
+ let plus1frac = plus1 & ((1 << e) - 1);
+
+ // calculate the largest `10^max_kappa` no more than `plus1` (thus `plus1 < 10^(max_kappa+1)`).
+ // this is an upper bound of `kappa` below.
+ let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(plus1int);
+
+ let mut i = 0;
+ let exp = max_kappa as i16 - minusk + 1;
+
+ // Theorem 6.2: if `k` is the greatest integer s.t. `0 <= y mod 10^k <= y - x`,
+ // then `V = floor(y / 10^k) * 10^k` is in `[x, y]` and one of the shortest
+ // representations (with the minimal number of significant digits) in that range.
+ //
+ // find the digit length `kappa` between `(minus1, plus1)` as per Theorem 6.2.
+ // Theorem 6.2 can be adopted to exclude `x` by requiring `y mod 10^k < y - x` instead.
+ // (e.g., `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
+ // the algorithm relies on the later verification phase to exclude `y`.
+ let delta1 = plus1 - minus1;
+ // let delta1int = (delta1 >> e) as usize; // only for explanation
+ let delta1frac = delta1 & ((1 << e) - 1);
+
+ // render integral parts, while checking for the accuracy at each step.
+ let mut kappa = max_kappa as i16;
+ let mut ten_kappa = max_ten_kappa; // 10^kappa
+ let mut remainder = plus1int; // digits yet to be rendered
+ loop {
+ // we always have at least one digit to render, as `plus1 >= 10^kappa`
+ // invariants:
+ // - `delta1int <= remainder < 10^(kappa+1)`
+ // - `plus1int = d[0..n-1] * 10^(kappa+1) + remainder`
+ // (it follows that `remainder = plus1int % 10^(kappa+1)`)
+
+ // divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
+ let q = remainder / ten_kappa;
+ let r = remainder % ten_kappa;
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ let plus1rem = ((r as u64) << e) + plus1frac; // == (plus1 % 10^kappa) * 2^e
+ if plus1rem < delta1 {
+ // `plus1 % 10^kappa < delta1 = plus1 - minus1`; we've found the correct `kappa`.
+ let ten_kappa = (ten_kappa as u64) << e; // scale 10^kappa back to the shared exponent
+ return round_and_weed(
+ // SAFETY: we initialized that memory above.
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..i]) },
+ exp,
+ plus1rem,
+ delta1,
+ plus1 - v.f,
+ ten_kappa,
+ 1,
+ );
+ }
+
+ // break the loop when we have rendered all integral digits.
+ // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
+ if i > max_kappa as usize {
+ debug_assert_eq!(ten_kappa, 1);
+ debug_assert_eq!(kappa, 0);
+ break;
+ }
+
+ // restore invariants
+ kappa -= 1;
+ ten_kappa /= 10;
+ remainder = r;
+ }
+
+ // render fractional parts, while checking for the accuracy at each step.
+ // this time we rely on repeated multiplications, as division will lose the precision.
+ let mut remainder = plus1frac;
+ let mut threshold = delta1frac;
+ let mut ulp = 1;
+ loop {
+ // the next digit should be significant as we've tested that before breaking out
+ // invariants, where `m = max_kappa + 1` (# of digits in the integral part):
+ // - `remainder < 2^e`
+ // - `plus1frac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
+
+ remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
+ threshold *= 10;
+ ulp *= 10;
+
+ // divide `remainder` by `10^kappa`.
+ // both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
+ let q = remainder >> e;
+ let r = remainder & ((1 << e) - 1);
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ if r < threshold {
+ let ten_kappa = 1 << e; // implicit divisor
+ return round_and_weed(
+ // SAFETY: we initialized that memory above.
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..i]) },
+ exp,
+ r,
+ threshold,
+ (plus1 - v.f) * ulp,
+ ten_kappa,
+ ulp,
+ );
+ }
+
+ // restore invariants
+ kappa -= 1;
+ remainder = r;
+ }
+
+ // we've generated all significant digits of `plus1`, but not sure if it's the optimal one.
+ // for example, if `minus1` is 3.14153... and `plus1` is 3.14158..., there are 5 different
+ // shortest representation from 3.14154 to 3.14158 but we only have the greatest one.
+ // we have to successively decrease the last digit and check if this is the optimal repr.
+ // there are at most 9 candidates (..1 to ..9), so this is fairly quick. ("rounding" phase)
+ //
+ // the function checks if this "optimal" repr is actually within the ulp ranges,
+ // and also, it is possible that the "second-to-optimal" repr can actually be optimal
+ // due to the rounding error. in either cases this returns `None`. ("weeding" phase)
+ //
+ // all arguments here are scaled by the common (but implicit) value `k`, so that:
+ // - `remainder = (plus1 % 10^kappa) * k`
+ // - `threshold = (plus1 - minus1) * k` (and also, `remainder < threshold`)
+ // - `plus1v = (plus1 - v) * k` (and also, `threshold > plus1v` from prior invariants)
+ // - `ten_kappa = 10^kappa * k`
+ // - `ulp = 2^-e * k`
+ fn round_and_weed(
+ buf: &mut [u8],
+ exp: i16,
+ remainder: u64,
+ threshold: u64,
+ plus1v: u64,
+ ten_kappa: u64,
+ ulp: u64,
+ ) -> Option<(&[u8], i16)> {
+ assert!(!buf.is_empty());
+
+ // produce two approximations to `v` (actually `plus1 - v`) within 1.5 ulps.
+ // the resulting representation should be the closest representation to both.
+ //
+ // here `plus1 - v` is used since calculations are done with respect to `plus1`
+ // in order to avoid overflow/underflow (hence the seemingly swapped names).
+ let plus1v_down = plus1v + ulp; // plus1 - (v - 1 ulp)
+ let plus1v_up = plus1v - ulp; // plus1 - (v + 1 ulp)
+
+ // decrease the last digit and stop at the closest representation to `v + 1 ulp`.
+ let mut plus1w = remainder; // plus1w(n) = plus1 - w(n)
+ {
+ let last = buf.last_mut().unwrap();
+
+ // we work with the approximated digits `w(n)`, which is initially equal to `plus1 -
+ // plus1 % 10^kappa`. after running the loop body `n` times, `w(n) = plus1 -
+ // plus1 % 10^kappa - n * 10^kappa`. we set `plus1w(n) = plus1 - w(n) =
+ // plus1 % 10^kappa + n * 10^kappa` (thus `remainder = plus1w(0)`) to simplify checks.
+ // note that `plus1w(n)` is always increasing.
+ //
+ // we have three conditions to terminate. any of them will make the loop unable to
+ // proceed, but we then have at least one valid representation known to be closest to
+ // `v + 1 ulp` anyway. we will denote them as TC1 through TC3 for brevity.
+ //
+ // TC1: `w(n) <= v + 1 ulp`, i.e., this is the last repr that can be the closest one.
+ // this is equivalent to `plus1 - w(n) = plus1w(n) >= plus1 - (v + 1 ulp) = plus1v_up`.
+ // combined with TC2 (which checks if `w(n+1)` is valid), this prevents the possible
+ // overflow on the calculation of `plus1w(n)`.
+ //
+ // TC2: `w(n+1) < minus1`, i.e., the next repr definitely does not round to `v`.
+ // this is equivalent to `plus1 - w(n) + 10^kappa = plus1w(n) + 10^kappa >
+ // plus1 - minus1 = threshold`. the left hand side can overflow, but we know
+ // `threshold > plus1v`, so if TC1 is false, `threshold - plus1w(n) >
+ // threshold - (plus1v - 1 ulp) > 1 ulp` and we can safely test if
+ // `threshold - plus1w(n) < 10^kappa` instead.
+ //
+ // TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e., the next repr is
+ // no closer to `v + 1 ulp` than the current repr. given `z(n) = plus1v_up - plus1w(n)`,
+ // this becomes `abs(z(n)) <= abs(z(n+1))`. again assuming that TC1 is false, we have
+ // `z(n) > 0`. we have two cases to consider:
+ //
+ // - when `z(n+1) >= 0`: TC3 becomes `z(n) <= z(n+1)`. as `plus1w(n)` is increasing,
+ // `z(n)` should be decreasing and this is clearly false.
+ // - when `z(n+1) < 0`:
+ // - TC3a: the precondition is `plus1v_up < plus1w(n) + 10^kappa`. assuming TC2 is
+ // false, `threshold >= plus1w(n) + 10^kappa` so it cannot overflow.
+ // - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e., `plus1v_up - plus1w(n) >=
+ // plus1w(n+1) - plus1v_up = plus1w(n) + 10^kappa - plus1v_up`. the negated TC1
+ // gives `plus1v_up > plus1w(n)`, so it cannot overflow or underflow when
+ // combined with TC3a.
+ //
+ // consequently, we should stop when `TC1 || TC2 || (TC3a && TC3b)`. the following is
+ // equal to its inverse, `!TC1 && !TC2 && (!TC3a || !TC3b)`.
+ while plus1w < plus1v_up
+ && threshold - plus1w >= ten_kappa
+ && (plus1w + ten_kappa < plus1v_up
+ || plus1v_up - plus1w >= plus1w + ten_kappa - plus1v_up)
+ {
+ *last -= 1;
+ debug_assert!(*last > b'0'); // the shortest repr cannot end with `0`
+ plus1w += ten_kappa;
+ }
+ }
+
+ // check if this representation is also the closest representation to `v - 1 ulp`.
+ //
+ // this is simply same to the terminating conditions for `v + 1 ulp`, with all `plus1v_up`
+ // replaced by `plus1v_down` instead. overflow analysis equally holds.
+ if plus1w < plus1v_down
+ && threshold - plus1w >= ten_kappa
+ && (plus1w + ten_kappa < plus1v_down
+ || plus1v_down - plus1w >= plus1w + ten_kappa - plus1v_down)
+ {
+ return None;
+ }
+
+ // now we have the closest representation to `v` between `plus1` and `minus1`.
+ // this is too liberal, though, so we reject any `w(n)` not between `plus0` and `minus0`,
+ // i.e., `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
+ // that `threshold = plus1 - minus1` and `plus1 - plus0 = minus0 - minus1 = 2 ulp`.
+ if 2 * ulp <= plus1w && plus1w <= threshold - 4 * ulp { Some((buf, exp)) } else { None }
+ }
+}
+
+/// The shortest mode implementation for Grisu with Dragon fallback.
+///
+/// This should be used for most cases.
+pub fn format_shortest<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ use crate::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ // SAFETY: The borrow checker is not smart enough to let us use `buf`
+ // in the second branch, so we launder the lifetime here. But we only re-use
+ // `buf` if `format_shortest_opt` returned `None` so this is okay.
+ match format_shortest_opt(d, unsafe { &mut *(buf as *mut _) }) {
+ Some(ret) => ret,
+ None => fallback(d, buf),
+ }
+}
+
+/// The exact and fixed mode implementation for Grisu.
+///
+/// It returns `None` when it would return an inexact representation otherwise.
+pub fn format_exact_opt<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+ limit: i16,
+) -> Option<(/*digits*/ &'a [u8], /*exp*/ i16)> {
+ assert!(d.mant > 0);
+ assert!(d.mant < (1 << 61)); // we need at least three bits of additional precision
+ assert!(!buf.is_empty());
+
+ // normalize and scale `v`.
+ let v = Fp { f: d.mant, e: d.exp }.normalize();
+ let (minusk, cached) = cached_power(ALPHA - v.e - 64, GAMMA - v.e - 64);
+ let v = v.mul(&cached);
+
+ // divide `v` into integral and fractional parts.
+ let e = -v.e as usize;
+ let vint = (v.f >> e) as u32;
+ let vfrac = v.f & ((1 << e) - 1);
+
+ // both old `v` and new `v` (scaled by `10^-k`) has an error of < 1 ulp (Theorem 5.1).
+ // as we don't know the error is positive or negative, we use two approximations
+ // spaced equally and have the maximal error of 2 ulps (same to the shortest case).
+ //
+ // the goal is to find the exactly rounded series of digits that are common to
+ // both `v - 1 ulp` and `v + 1 ulp`, so that we are maximally confident.
+ // if this is not possible, we don't know which one is the correct output for `v`,
+ // so we give up and fall back.
+ //
+ // `err` is defined as `1 ulp * 2^e` here (same to the ulp in `vfrac`),
+ // and we will scale it whenever `v` gets scaled.
+ let mut err = 1;
+
+ // calculate the largest `10^max_kappa` no more than `v` (thus `v < 10^(max_kappa+1)`).
+ // this is an upper bound of `kappa` below.
+ let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(vint);
+
+ let mut i = 0;
+ let exp = max_kappa as i16 - minusk + 1;
+
+ // if we are working with the last-digit limitation, we need to shorten the buffer
+ // before the actual rendering in order to avoid double rounding.
+ // note that we have to enlarge the buffer again when rounding up happens!
+ let len = if exp <= limit {
+ // oops, we cannot even produce *one* digit.
+ // this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
+ //
+ // in principle we can immediately call `possibly_round` with an empty buffer,
+ // but scaling `max_ten_kappa << e` by 10 can result in overflow.
+ // thus we are being sloppy here and widen the error range by a factor of 10.
+ // this will increase the false negative rate, but only very, *very* slightly;
+ // it can only matter noticeably when the mantissa is bigger than 60 bits.
+ //
+ // SAFETY: `len=0`, so the obligation of having initialized this memory is trivial.
+ return unsafe {
+ possibly_round(buf, 0, exp, limit, v.f / 10, (max_ten_kappa as u64) << e, err << e)
+ };
+ } else if ((exp as i32 - limit as i32) as usize) < buf.len() {
+ (exp - limit) as usize
+ } else {
+ buf.len()
+ };
+ debug_assert!(len > 0);
+
+ // render integral parts.
+ // the error is entirely fractional, so we don't need to check it in this part.
+ let mut kappa = max_kappa as i16;
+ let mut ten_kappa = max_ten_kappa; // 10^kappa
+ let mut remainder = vint; // digits yet to be rendered
+ loop {
+ // we always have at least one digit to render
+ // invariants:
+ // - `remainder < 10^(kappa+1)`
+ // - `vint = d[0..n-1] * 10^(kappa+1) + remainder`
+ // (it follows that `remainder = vint % 10^(kappa+1)`)
+
+ // divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
+ let q = remainder / ten_kappa;
+ let r = remainder % ten_kappa;
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ // is the buffer full? run the rounding pass with the remainder.
+ if i == len {
+ let vrem = ((r as u64) << e) + vfrac; // == (v % 10^kappa) * 2^e
+ // SAFETY: we have initialized `len` many bytes.
+ return unsafe {
+ possibly_round(buf, len, exp, limit, vrem, (ten_kappa as u64) << e, err << e)
+ };
+ }
+
+ // break the loop when we have rendered all integral digits.
+ // the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
+ if i > max_kappa as usize {
+ debug_assert_eq!(ten_kappa, 1);
+ debug_assert_eq!(kappa, 0);
+ break;
+ }
+
+ // restore invariants
+ kappa -= 1;
+ ten_kappa /= 10;
+ remainder = r;
+ }
+
+ // render fractional parts.
+ //
+ // in principle we can continue to the last available digit and check for the accuracy.
+ // unfortunately we are working with the finite-sized integers, so we need some criterion
+ // to detect the overflow. V8 uses `remainder > err`, which becomes false when
+ // the first `i` significant digits of `v - 1 ulp` and `v` differ. however this rejects
+ // too many otherwise valid input.
+ //
+ // since the later phase has a correct overflow detection, we instead use tighter criterion:
+ // we continue til `err` exceeds `10^kappa / 2`, so that the range between `v - 1 ulp` and
+ // `v + 1 ulp` definitely contains two or more rounded representations. this is same to
+ // the first two comparisons from `possibly_round`, for the reference.
+ let mut remainder = vfrac;
+ let maxerr = 1 << (e - 1);
+ while err < maxerr {
+ // invariants, where `m = max_kappa + 1` (# of digits in the integral part):
+ // - `remainder < 2^e`
+ // - `vfrac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
+ // - `err = 10^(n-m)`
+
+ remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
+ err *= 10; // won't overflow, `err * 10 < 2^e * 5 < 2^64`
+
+ // divide `remainder` by `10^kappa`.
+ // both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
+ let q = remainder >> e;
+ let r = remainder & ((1 << e) - 1);
+ debug_assert!(q < 10);
+ buf[i] = MaybeUninit::new(b'0' + q as u8);
+ i += 1;
+
+ // is the buffer full? run the rounding pass with the remainder.
+ if i == len {
+ // SAFETY: we have initialized `len` many bytes.
+ return unsafe { possibly_round(buf, len, exp, limit, r, 1 << e, err) };
+ }
+
+ // restore invariants
+ remainder = r;
+ }
+
+ // further calculation is useless (`possibly_round` definitely fails), so we give up.
+ return None;
+
+ // we've generated all requested digits of `v`, which should be also same to corresponding
+ // digits of `v - 1 ulp`. now we check if there is a unique representation shared by
+ // both `v - 1 ulp` and `v + 1 ulp`; this can be either same to generated digits, or
+ // to the rounded-up version of those digits. if the range contains multiple representations
+ // of the same length, we cannot be sure and should return `None` instead.
+ //
+ // all arguments here are scaled by the common (but implicit) value `k`, so that:
+ // - `remainder = (v % 10^kappa) * k`
+ // - `ten_kappa = 10^kappa * k`
+ // - `ulp = 2^-e * k`
+ //
+ // SAFETY: the first `len` bytes of `buf` must be initialized.
+ unsafe fn possibly_round(
+ buf: &mut [MaybeUninit<u8>],
+ mut len: usize,
+ mut exp: i16,
+ limit: i16,
+ remainder: u64,
+ ten_kappa: u64,
+ ulp: u64,
+ ) -> Option<(&[u8], i16)> {
+ debug_assert!(remainder < ten_kappa);
+
+ // 10^kappa
+ // : : :<->: :
+ // : : : : :
+ // :|1 ulp|1 ulp| :
+ // :|<--->|<--->| :
+ // ----|-----|-----|----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // (for the reference, the dotted line indicates the exact value for
+ // possible representations in given number of digits.)
+ //
+ // error is too large that there are at least three possible representations
+ // between `v - 1 ulp` and `v + 1 ulp`. we cannot determine which one is correct.
+ if ulp >= ten_kappa {
+ return None;
+ }
+
+ // 10^kappa
+ // :<------->:
+ // : :
+ // : |1 ulp|1 ulp|
+ // : |<--->|<--->|
+ // ----|-----|-----|----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // in fact, 1/2 ulp is enough to introduce two possible representations.
+ // (remember that we need a unique representation for both `v - 1 ulp` and `v + 1 ulp`.)
+ // this won't overflow, as `ulp < ten_kappa` from the first check.
+ if ten_kappa - ulp <= ulp {
+ return None;
+ }
+
+ // remainder
+ // :<->| :
+ // : | :
+ // :<--------- 10^kappa ---------->:
+ // | : | :
+ // |1 ulp|1 ulp| :
+ // |<--->|<--->| :
+ // ----|-----|-----|------------------------
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // if `v + 1 ulp` is closer to the rounded-down representation (which is already in `buf`),
+ // then we can safely return. note that `v - 1 ulp` *can* be less than the current
+ // representation, but as `1 ulp < 10^kappa / 2`, this condition is enough:
+ // the distance between `v - 1 ulp` and the current representation
+ // cannot exceed `10^kappa / 2`.
+ //
+ // the condition equals to `remainder + ulp < 10^kappa / 2`.
+ // since this can easily overflow, first check if `remainder < 10^kappa / 2`.
+ // we've already verified that `ulp < 10^kappa / 2`, so as long as
+ // `10^kappa` did not overflow after all, the second check is fine.
+ if ten_kappa - remainder > remainder && ten_kappa - 2 * remainder >= 2 * ulp {
+ // SAFETY: our caller initialized that memory.
+ return Some((unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, exp));
+ }
+
+ // :<------- remainder ------>| :
+ // : | :
+ // :<--------- 10^kappa --------->:
+ // : | | : |
+ // : |1 ulp|1 ulp|
+ // : |<--->|<--->|
+ // -----------------------|-----|-----|-----
+ // | v |
+ // v - 1 ulp v + 1 ulp
+ //
+ // on the other hands, if `v - 1 ulp` is closer to the rounded-up representation,
+ // we should round up and return. for the same reason we don't need to check `v + 1 ulp`.
+ //
+ // the condition equals to `remainder - ulp >= 10^kappa / 2`.
+ // again we first check if `remainder > ulp` (note that this is not `remainder >= ulp`,
+ // as `10^kappa` is never zero). also note that `remainder - ulp <= 10^kappa`,
+ // so the second check does not overflow.
+ if remainder > ulp && ten_kappa - (remainder - ulp) <= remainder - ulp {
+ if let Some(c) =
+ // SAFETY: our caller must have initialized that memory.
+ round_up(unsafe { MaybeUninit::slice_assume_init_mut(&mut buf[..len]) })
+ {
+ // only add an additional digit when we've been requested the fixed precision.
+ // we also need to check that, if the original buffer was empty,
+ // the additional digit can only be added when `exp == limit` (edge case).
+ exp += 1;
+ if exp > limit && len < buf.len() {
+ buf[len] = MaybeUninit::new(c);
+ len += 1;
+ }
+ }
+ // SAFETY: we and our caller initialized that memory.
+ return Some((unsafe { MaybeUninit::slice_assume_init_ref(&buf[..len]) }, exp));
+ }
+
+ // otherwise we are doomed (i.e., some values between `v - 1 ulp` and `v + 1 ulp` are
+ // rounding down and others are rounding up) and give up.
+ None
+ }
+}
+
+/// The exact and fixed mode implementation for Grisu with Dragon fallback.
+///
+/// This should be used for most cases.
+pub fn format_exact<'a>(
+ d: &Decoded,
+ buf: &'a mut [MaybeUninit<u8>],
+ limit: i16,
+) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
+ use crate::num::flt2dec::strategy::dragon::format_exact as fallback;
+ // SAFETY: The borrow checker is not smart enough to let us use `buf`
+ // in the second branch, so we launder the lifetime here. But we only re-use
+ // `buf` if `format_exact_opt` returned `None` so this is okay.
+ match format_exact_opt(d, unsafe { &mut *(buf as *mut _) }, limit) {
+ Some(ret) => ret,
+ None => fallback(d, buf, limit),
+ }
+}
--- /dev/null
+macro_rules! int_impl {
+ ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $Min:expr, $Max:expr, $Feature:expr,
+ $EndFeature:expr, $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr,
+ $reversed:expr, $le_bytes:expr, $be_bytes:expr,
+ $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr) => {
+ doc_comment! {
+ concat!("The smallest value that can be represented by this integer type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::MIN, ", stringify!($Min), ");",
+$EndFeature, "
+```"),
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: Self = !0 ^ ((!0 as $UnsignedT) >> 1) as Self;
+ }
+
+ doc_comment! {
+ concat!("The largest value that can be represented by this integer type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::MAX, ", stringify!($Max), ");",
+$EndFeature, "
+```"),
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: Self = !Self::MIN;
+ }
+
+ doc_comment! {
+ concat!("The size of this integer type in bits.
+
+# Examples
+
+```
+", $Feature, "#![feature(int_bits_const)]
+assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[unstable(feature = "int_bits_const", issue = "76904")]
+ pub const BITS: u32 = $BITS;
+ }
+
+ doc_comment! {
+ concat!("Converts a string slice in a given base to an integer.
+
+The string is expected to be an optional `+` or `-` sign followed by digits.
+Leading and trailing whitespace represent an error. Digits are a subset of these characters,
+depending on `radix`:
+
+ * `0-9`
+ * `a-z`
+ * `A-Z`
+
+# Panics
+
+This function panics if `radix` is not in the range from 2 to 36.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
+ from_str_radix(src, radix)
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of ones in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0b100_0000", stringify!($SelfT), ";
+
+assert_eq!(n.count_ones(), 1);",
+$EndFeature, "
+```
+"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::MAX.count_zeros(), 1);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn count_zeros(self) -> u32 {
+ (!self).count_ones()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of leading zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = -1", stringify!($SelfT), ";
+
+assert_eq!(n.leading_zeros(), 0);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn leading_zeros(self) -> u32 {
+ (self as $UnsignedT).leading_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of trailing zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = -4", stringify!($SelfT), ";
+
+assert_eq!(n.trailing_zeros(), 2);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn trailing_zeros(self) -> u32 {
+ (self as $UnsignedT).trailing_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of leading ones in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = -1", stringify!($SelfT), ";
+
+assert_eq!(n.leading_ones(), ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[inline]
+ pub const fn leading_ones(self) -> u32 {
+ (self as $UnsignedT).leading_ones()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of trailing ones in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 3", stringify!($SelfT), ";
+
+assert_eq!(n.trailing_ones(), 2);",
+$EndFeature, "
+```"),
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[inline]
+ pub const fn trailing_ones(self) -> u32 {
+ (self as $UnsignedT).trailing_ones()
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts the bits to the left by a specified amount, `n`,
+wrapping the truncated bits to the end of the resulting integer.
+
+Please note this isn't the same operation as the `<<` shifting operator!
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $rot_op, stringify!($SelfT), ";
+let m = ", $rot_result, ";
+
+assert_eq!(n.rotate_left(", $rot, "), m);
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ (self as $UnsignedT).rotate_left(n) as Self
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts the bits to the right by a specified amount, `n`,
+wrapping the truncated bits to the beginning of the resulting
+integer.
+
+Please note this isn't the same operation as the `>>` shifting operator!
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $rot_result, stringify!($SelfT), ";
+let m = ", $rot_op, ";
+
+assert_eq!(n.rotate_right(", $rot, "), m);
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ (self as $UnsignedT).rotate_right(n) as Self
+ }
+ }
+
+ doc_comment! {
+ concat!("Reverses the byte order of the integer.
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $swap_op, stringify!($SelfT), ";
+
+let m = n.swap_bytes();
+
+assert_eq!(m, ", $swapped, ");
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn swap_bytes(self) -> Self {
+ (self as $UnsignedT).swap_bytes() as Self
+ }
+ }
+
+ doc_comment! {
+ concat!("Reverses the order of bits in the integer. The least significant bit becomes the most significant bit,
+ second least-significant bit becomes second most-significant bit, etc.
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $swap_op, stringify!($SelfT), ";
+let m = n.reverse_bits();
+
+assert_eq!(m, ", $reversed, ");
+assert_eq!(0, 0", stringify!($SelfT), ".reverse_bits());
+```"),
+ #[stable(feature = "reverse_bits", since = "1.37.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ #[must_use]
+ pub const fn reverse_bits(self) -> Self {
+ (self as $UnsignedT).reverse_bits() as Self
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts an integer from big endian to the target's endianness.
+
+On big endian this is a no-op. On little endian the bytes are swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"big\") {
+ assert_eq!(", stringify!($SelfT), "::from_be(n), n)
+} else {
+ assert_eq!(", stringify!($SelfT), "::from_be(n), n.swap_bytes())
+}",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[inline]
+ pub const fn from_be(x: Self) -> Self {
+ #[cfg(target_endian = "big")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ x.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts an integer from little endian to the target's endianness.
+
+On little endian this is a no-op. On big endian the bytes are swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"little\") {
+ assert_eq!(", stringify!($SelfT), "::from_le(n), n)
+} else {
+ assert_eq!(", stringify!($SelfT), "::from_le(n), n.swap_bytes())
+}",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[inline]
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ x.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts `self` to big endian from the target's endianness.
+
+On big endian this is a no-op. On little endian the bytes are swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"big\") {
+ assert_eq!(n.to_be(), n)
+} else {
+ assert_eq!(n.to_be(), n.swap_bytes())
+}",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[inline]
+ pub const fn to_be(self) -> Self { // or not to be?
+ #[cfg(target_endian = "big")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ self.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts `self` to little endian from the target's endianness.
+
+On little endian this is a no-op. On big endian the bytes are swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"little\") {
+ assert_eq!(n.to_le(), n)
+} else {
+ assert_eq!(n.to_le(), n.swap_bytes())
+}",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_conversions", since = "1.32.0")]
+ #[inline]
+ pub const fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ self.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer addition. Computes `self + rhs`, returning `None`
+if overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!((", stringify!($SelfT),
+"::MAX - 2).checked_add(1), Some(", stringify!($SelfT), "::MAX - 1));
+assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Unchecked integer addition. Computes `self + rhs`, assuming overflow
+cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT),
+"::MAX` or `self + rhs < ", stringify!($SelfT), "::MIN`."),
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "none",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_add`.
+ unsafe { intrinsics::unchecked_add(self, rhs) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if
+overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!((", stringify!($SelfT),
+"::MIN + 2).checked_sub(1), Some(", stringify!($SelfT), "::MIN + 1));
+assert_eq!((", stringify!($SelfT), "::MIN + 2).checked_sub(3), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_sub(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Unchecked integer subtraction. Computes `self - rhs`, assuming overflow
+cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT),
+"::MAX` or `self - rhs < ", stringify!($SelfT), "::MIN`."),
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "none",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_sub`.
+ unsafe { intrinsics::unchecked_sub(self, rhs) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if
+overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT),
+"::MAX.checked_mul(1), Some(", stringify!($SelfT), "::MAX));
+assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_mul(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Unchecked integer multiplication. Computes `self * rhs`, assuming overflow
+cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT),
+"::MAX` or `self * rhs < ", stringify!($SelfT), "::MIN`."),
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "none",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_mul`.
+ unsafe { intrinsics::unchecked_mul(self, rhs) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`
+or the division results in overflow.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!((", stringify!($SelfT),
+"::MIN + 1).checked_div(-1), Some(", stringify!($Max), "));
+assert_eq!(", stringify!($SelfT), "::MIN.checked_div(-1), None);
+assert_eq!((1", stringify!($SelfT), ").checked_div(0), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_checked_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
+ None
+ } else {
+ // SAFETY: div by zero and by INT_MIN have been checked above
+ Some(unsafe { intrinsics::unchecked_div(self, rhs) })
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked Euclidean division. Computes `self.div_euclid(rhs)`,
+returning `None` if `rhs == 0` or the division results in overflow.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!((", stringify!($SelfT),
+"::MIN + 1).checked_div_euclid(-1), Some(", stringify!($Max), "));
+assert_eq!(", stringify!($SelfT), "::MIN.checked_div_euclid(-1), None);
+assert_eq!((1", stringify!($SelfT), ").checked_div_euclid(0), None);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
+ None
+ } else {
+ Some(self.div_euclid(rhs))
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer remainder. Computes `self % rhs`, returning `None` if
+`rhs == 0` or the division results in overflow.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1));
+assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);
+assert_eq!(", stringify!($SelfT), "::MIN.checked_rem(-1), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_checked_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
+ None
+ } else {
+ // SAFETY: div by zero and by INT_MIN have been checked above
+ Some(unsafe { intrinsics::unchecked_rem(self, rhs) })
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked Euclidean remainder. Computes `self.rem_euclid(rhs)`, returning `None`
+if `rhs == 0` or the division results in overflow.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(2), Some(1));
+assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None);
+assert_eq!(", stringify!($SelfT), "::MIN.checked_rem_euclid(-1), None);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
+ None
+ } else {
+ Some(self.rem_euclid(rhs))
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked negation. Computes `-self`, returning `None` if `self == MIN`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".checked_neg(), Some(-5));
+assert_eq!(", stringify!($SelfT), "::MIN.checked_neg(), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn checked_neg(self) -> Option<Self> {
+ let (a, b) = self.overflowing_neg();
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked shift left. Computes `self << rhs`, returning `None` if `rhs` is larger
+than or equal to the number of bits in `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0x1", stringify!($SelfT), ".checked_shl(4), Some(0x10));
+assert_eq!(0x1", stringify!($SelfT), ".checked_shl(129), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shl(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is
+larger than or equal to the number of bits in `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".checked_shr(4), Some(0x1));
+assert_eq!(0x10", stringify!($SelfT), ".checked_shr(128), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shr(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked absolute value. Computes `self.abs()`, returning `None` if
+`self == MIN`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!((-5", stringify!($SelfT), ").checked_abs(), Some(5));
+assert_eq!(", stringify!($SelfT), "::MIN.checked_abs(), None);",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn checked_abs(self) -> Option<Self> {
+ if self.is_negative() {
+ self.checked_neg()
+ } else {
+ Some(self)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
+overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(8", stringify!($SelfT), ".checked_pow(2), Some(64));
+assert_eq!(", stringify!($SelfT), "::MAX.checked_pow(2), None);",
+$EndFeature, "
+```"),
+
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
+ if exp == 0 {
+ return Some(1);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = try_opt!(acc.checked_mul(base));
+ }
+ exp /= 2;
+ base = try_opt!(base.checked_mul(base));
+ }
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ Some(try_opt!(acc.checked_mul(base)))
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer addition. Computes `self + rhs`, saturating at the numeric
+bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101);
+assert_eq!(", stringify!($SelfT), "::MAX.saturating_add(100), ", stringify!($SelfT),
+"::MAX);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_add(-1), ", stringify!($SelfT),
+"::MIN);",
+$EndFeature, "
+```"),
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_add(self, rhs: Self) -> Self {
+ intrinsics::saturating_add(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer subtraction. Computes `self - rhs`, saturating at the
+numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_sub(127), -27);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_sub(100), ", stringify!($SelfT),
+"::MIN);
+assert_eq!(", stringify!($SelfT), "::MAX.saturating_sub(-1), ", stringify!($SelfT),
+"::MAX);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_sub(self, rhs: Self) -> Self {
+ intrinsics::saturating_sub(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer negation. Computes `-self`, returning `MAX` if `self == MIN`
+instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_neg(), -100);
+assert_eq!((-100", stringify!($SelfT), ").saturating_neg(), 100);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_neg(), ", stringify!($SelfT),
+"::MAX);
+assert_eq!(", stringify!($SelfT), "::MAX.saturating_neg(), ", stringify!($SelfT),
+"::MIN + 1);",
+$EndFeature, "
+```"),
+
+ #[stable(feature = "saturating_neg", since = "1.45.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn saturating_neg(self) -> Self {
+ intrinsics::saturating_sub(0, self)
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating absolute value. Computes `self.abs()`, returning `MAX` if `self ==
+MIN` instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_abs(), 100);
+assert_eq!((-100", stringify!($SelfT), ").saturating_abs(), 100);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_abs(), ", stringify!($SelfT),
+"::MAX);
+assert_eq!((", stringify!($SelfT), "::MIN + 1).saturating_abs(), ", stringify!($SelfT),
+"::MAX);",
+$EndFeature, "
+```"),
+
+ #[stable(feature = "saturating_neg", since = "1.45.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn saturating_abs(self) -> Self {
+ if self.is_negative() {
+ self.saturating_neg()
+ } else {
+ self
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer multiplication. Computes `self * rhs`, saturating at the
+numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(10", stringify!($SelfT), ".saturating_mul(12), 120);
+assert_eq!(", stringify!($SelfT), "::MAX.saturating_mul(10), ", stringify!($SelfT), "::MAX);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_mul(10), ", stringify!($SelfT), "::MIN);",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_mul(self, rhs: Self) -> Self {
+ match self.checked_mul(rhs) {
+ Some(x) => x,
+ None => if (self < 0) == (rhs < 0) {
+ Self::MAX
+ } else {
+ Self::MIN
+ }
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer exponentiation. Computes `self.pow(exp)`,
+saturating at the numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!((-4", stringify!($SelfT), ").saturating_pow(3), -64);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(2), ", stringify!($SelfT), "::MAX);
+assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(3), ", stringify!($SelfT), "::MIN);",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_pow(self, exp: u32) -> Self {
+ match self.checked_pow(exp) {
+ Some(x) => x,
+ None if self < 0 && exp % 2 == 1 => Self::MIN,
+ None => Self::MAX,
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) addition. Computes `self + rhs`, wrapping around at the
+boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_add(27), 127);
+assert_eq!(", stringify!($SelfT), "::MAX.wrapping_add(2), ", stringify!($SelfT),
+"::MIN + 1);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_add(self, rhs: Self) -> Self {
+ intrinsics::wrapping_add(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) subtraction. Computes `self - rhs`, wrapping around at the
+boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0", stringify!($SelfT), ".wrapping_sub(127), -127);
+assert_eq!((-2", stringify!($SelfT), ").wrapping_sub(", stringify!($SelfT), "::MAX), ",
+stringify!($SelfT), "::MAX);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_sub(self, rhs: Self) -> Self {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) multiplication. Computes `self * rhs`, wrapping around at
+the boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(10", stringify!($SelfT), ".wrapping_mul(12), 120);
+assert_eq!(11i8.wrapping_mul(12), -124);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_mul(self, rhs: Self) -> Self {
+ intrinsics::wrapping_mul(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) division. Computes `self / rhs`, wrapping around at the
+boundary of the type.
+
+The only case where such wrapping can occur is when one divides `MIN / -1` on a signed type (where
+`MIN` is the negative minimal value for the type); this is equivalent to `-MIN`, a positive value
+that is too large to represent in the type. In such a case, this function returns `MIN` itself.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_div(10), 10);
+assert_eq!((-128i8).wrapping_div(-1), -128);",
+$EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_unstable(feature = "const_wrapping_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_div(self, rhs: Self) -> Self {
+ self.overflowing_div(rhs).0
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping Euclidean division. Computes `self.div_euclid(rhs)`,
+wrapping around at the boundary of the type.
+
+Wrapping will only occur in `MIN / -1` on a signed type (where `MIN` is the negative minimal value
+for the type). This is equivalent to `-MIN`, a positive value that is too large to represent in the
+type. In this case, this method returns `MIN` itself.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(100", stringify!($SelfT), ".wrapping_div_euclid(10), 10);
+assert_eq!((-128i8).wrapping_div_euclid(-1), -128);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_div_euclid(self, rhs: Self) -> Self {
+ self.overflowing_div_euclid(rhs).0
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) remainder. Computes `self % rhs`, wrapping around at the
+boundary of the type.
+
+Such wrap-around never actually occurs mathematically; implementation artifacts make `x % y`
+invalid for `MIN / -1` on a signed type (where `MIN` is the negative minimal value). In such a case,
+this function returns `0`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_rem(10), 0);
+assert_eq!((-128i8).wrapping_rem(-1), 0);",
+$EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_unstable(feature = "const_wrapping_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_rem(self, rhs: Self) -> Self {
+ self.overflowing_rem(rhs).0
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping Euclidean remainder. Computes `self.rem_euclid(rhs)`, wrapping around
+at the boundary of the type.
+
+Wrapping will only occur in `MIN % -1` on a signed type (where `MIN` is the negative minimal value
+for the type). In this case, this method returns 0.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(100", stringify!($SelfT), ".wrapping_rem_euclid(10), 0);
+assert_eq!((-128i8).wrapping_rem_euclid(-1), 0);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_rem_euclid(self, rhs: Self) -> Self {
+ self.overflowing_rem_euclid(rhs).0
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) negation. Computes `-self`, wrapping around at the boundary
+of the type.
+
+The only case where such wrapping can occur is when one negates `MIN` on a signed type (where `MIN`
+is the negative minimal value for the type); this is a positive value that is too large to represent
+in the type. In such a case, this function returns `MIN` itself.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_neg(), -100);
+assert_eq!(", stringify!($SelfT), "::MIN.wrapping_neg(), ", stringify!($SelfT),
+"::MIN);",
+$EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn wrapping_neg(self) -> Self {
+ self.overflowing_neg().0
+ }
+ }
+
+ doc_comment! {
+ concat!("Panic-free bitwise shift-left; yields `self << mask(rhs)`, where `mask` removes
+any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type.
+
+Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to
+the range of the type, rather than the bits shifted out of the LHS being returned to the other end.
+The primitive integer types all implement a `[`rotate_left`](#method.rotate_left) function,
+which may be what you want instead.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(7), -128);
+assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(128), -1);",
+$EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_shl(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Panic-free bitwise shift-right; yields `self >> mask(rhs)`, where `mask`
+removes any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type.
+
+Note that this is *not* the same as a rotate-right; the RHS of a wrapping shift-right is restricted
+to the range of the type, rather than the bits shifted out of the LHS being returned to the other
+end. The primitive integer types all implement a [`rotate_right`](#method.rotate_right) function,
+which may be what you want instead.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!((-128", stringify!($SelfT), ").wrapping_shr(7), -1);
+assert_eq!((-128i16).wrapping_shr(64), -128);",
+$EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_shr(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) absolute value. Computes `self.abs()`, wrapping around at
+the boundary of the type.
+
+The only case where such wrapping can occur is when one takes the absolute value of the negative
+minimal value for the type; this is a positive value that is too large to represent in the type. In
+such a case, this function returns `MIN` itself.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_abs(), 100);
+assert_eq!((-100", stringify!($SelfT), ").wrapping_abs(), 100);
+assert_eq!(", stringify!($SelfT), "::MIN.wrapping_abs(), ", stringify!($SelfT),
+"::MIN);
+assert_eq!((-128i8).wrapping_abs() as u8, 128);",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[allow(unused_attributes)]
+ #[inline]
+ pub const fn wrapping_abs(self) -> Self {
+ if self.is_negative() {
+ self.wrapping_neg()
+ } else {
+ self
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Computes the absolute value of `self` without any wrapping
+or panicking.
+
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "#![feature(unsigned_abs)]
+assert_eq!(100", stringify!($SelfT), ".unsigned_abs(), 100", stringify!($UnsignedT), ");
+assert_eq!((-100", stringify!($SelfT), ").unsigned_abs(), 100", stringify!($UnsignedT), ");
+assert_eq!((-128i8).unsigned_abs(), 128u8);",
+$EndFeature, "
+```"),
+ #[unstable(feature = "unsigned_abs", issue = "74913")]
+ #[inline]
+ pub const fn unsigned_abs(self) -> $UnsignedT {
+ self.wrapping_abs() as $UnsignedT
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) exponentiation. Computes `self.pow(exp)`,
+wrapping around at the boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(3", stringify!($SelfT), ".wrapping_pow(4), 81);
+assert_eq!(3i8.wrapping_pow(5), -13);
+assert_eq!(3i8.wrapping_pow(6), -39);",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc.wrapping_mul(base);
+ }
+ exp /= 2;
+ base = base.wrapping_mul(base);
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc.wrapping_mul(base)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates `self` + `rhs`
+
+Returns a tuple of the addition along with a boolean indicating whether an arithmetic overflow would
+occur. If an overflow would have occurred then the wrapped value is returned.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));
+assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (", stringify!($SelfT),
+"::MIN, true));", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates `self` - `rhs`
+
+Returns a tuple of the subtraction along with a boolean indicating whether an arithmetic overflow
+would occur. If an overflow would have occurred then the wrapped value is returned.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));
+assert_eq!(", stringify!($SelfT), "::MIN.overflowing_sub(1), (", stringify!($SelfT),
+"::MAX, true));", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the multiplication of `self` and `rhs`.
+
+Returns a tuple of the multiplication along with a boolean indicating whether an arithmetic overflow
+would occur. If an overflow would have occurred then the wrapped value is returned.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(5", stringify!($SelfT), ".overflowing_mul(2), (10, false));
+assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true));",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the divisor when `self` is divided by `rhs`.
+
+Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would
+occur. If an overflow would occur then self is returned.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));
+assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div(-1), (", stringify!($SelfT),
+"::MIN, true));",
+$EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_overflowing_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div(self, rhs: Self) -> (Self, bool) {
+ if unlikely!(self == Self::MIN && rhs == -1) {
+ (self, true)
+ } else {
+ (self / rhs, false)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the quotient of Euclidean division `self.div_euclid(rhs)`.
+
+Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would
+occur. If an overflow would occur then `self` is returned.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(5", stringify!($SelfT), ".overflowing_div_euclid(2), (2, false));
+assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div_euclid(-1), (", stringify!($SelfT),
+"::MIN, true));
+```"),
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
+ if unlikely!(self == Self::MIN && rhs == -1) {
+ (self, true)
+ } else {
+ (self.div_euclid(rhs), false)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the remainder when `self` is divided by `rhs`.
+
+Returns a tuple of the remainder after dividing along with a boolean indicating whether an
+arithmetic overflow would occur. If an overflow would occur then 0 is returned.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));
+assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem(-1), (0, true));",
+$EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_overflowing_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
+ if unlikely!(self == Self::MIN && rhs == -1) {
+ (0, true)
+ } else {
+ (self % rhs, false)
+ }
+ }
+ }
+
+
+ doc_comment! {
+ concat!("Overflowing Euclidean remainder. Calculates `self.rem_euclid(rhs)`.
+
+Returns a tuple of the remainder after dividing along with a boolean indicating whether an
+arithmetic overflow would occur. If an overflow would occur then 0 is returned.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(5", stringify!($SelfT), ".overflowing_rem_euclid(2), (1, false));
+assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem_euclid(-1), (0, true));
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
+ if unlikely!(self == Self::MIN && rhs == -1) {
+ (0, true)
+ } else {
+ (self.rem_euclid(rhs), false)
+ }
+ }
+ }
+
+
+ doc_comment! {
+ concat!("Negates self, overflowing if this is equal to the minimum value.
+
+Returns a tuple of the negated version of self along with a boolean indicating whether an overflow
+happened. If `self` is the minimum value (e.g., `i32::MIN` for values of type `i32`), then the
+minimum value will be returned again and `true` will be returned for an overflow happening.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2, false));
+assert_eq!(", stringify!($SelfT), "::MIN.overflowing_neg(), (", stringify!($SelfT),
+"::MIN, true));", $EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[allow(unused_attributes)]
+ pub const fn overflowing_neg(self) -> (Self, bool) {
+ if unlikely!(self == Self::MIN) {
+ (Self::MIN, true)
+ } else {
+ (-self, false)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts self left by `rhs` bits.
+
+Returns a tuple of the shifted version of self along with a boolean indicating whether the shift
+value was larger than or equal to the number of bits. If the shift value is too large, then value is
+masked (N-1) where N is the number of bits, and this value is then used to perform the shift.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0x1", stringify!($SelfT),".overflowing_shl(4), (0x10, false));
+assert_eq!(0x1i32.overflowing_shl(36), (0x10, true));",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shl(rhs), (rhs > ($BITS - 1)))
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts self right by `rhs` bits.
+
+Returns a tuple of the shifted version of self along with a boolean indicating whether the shift
+value was larger than or equal to the number of bits. If the shift value is too large, then value is
+masked (N-1) where N is the number of bits, and this value is then used to perform the shift.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(4), (0x1, false));
+assert_eq!(0x10i32.overflowing_shr(36), (0x1, true));",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shr(rhs), (rhs > ($BITS - 1)))
+ }
+ }
+
+ doc_comment! {
+ concat!("Computes the absolute value of `self`.
+
+Returns a tuple of the absolute version of self along with a boolean indicating whether an overflow
+happened. If self is the minimum value (e.g., ", stringify!($SelfT), "::MIN for values of type
+ ", stringify!($SelfT), "), then the minimum value will be returned again and true will be returned
+for an overflow happening.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(10", stringify!($SelfT), ".overflowing_abs(), (10, false));
+assert_eq!((-10", stringify!($SelfT), ").overflowing_abs(), (10, false));
+assert_eq!((", stringify!($SelfT), "::MIN).overflowing_abs(), (", stringify!($SelfT),
+"::MIN, true));",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn overflowing_abs(self) -> (Self, bool) {
+ (self.wrapping_abs(), self == Self::MIN)
+ }
+ }
+
+ doc_comment! {
+ concat!("Raises self to the power of `exp`, using exponentiation by squaring.
+
+Returns a tuple of the exponentiation along with a bool indicating
+whether an overflow happened.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(3", stringify!($SelfT), ".overflowing_pow(4), (81, false));
+assert_eq!(3i8.overflowing_pow(5), (-13, true));",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
+ if exp == 0 {
+ return (1,false);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+ let mut overflown = false;
+ // Scratch space for storing results of overflowing_mul.
+ let mut r;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ r = acc.overflowing_mul(base);
+ acc = r.0;
+ overflown |= r.1;
+ }
+ exp /= 2;
+ r = base.overflowing_mul(base);
+ base = r.0;
+ overflown |= r.1;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ r = acc.overflowing_mul(base);
+ r.1 |= overflown;
+ r
+ }
+ }
+
+ doc_comment! {
+ concat!("Raises self to the power of `exp`, using exponentiation by squaring.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let x: ", stringify!($SelfT), " = 2; // or any other integer type
+
+assert_eq!(x.pow(5), 32);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc * base;
+ }
+ exp /= 2;
+ base = base * base;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc * base
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the quotient of Euclidean division of `self` by `rhs`.
+
+This computes the integer `n` such that `self = n * rhs + self.rem_euclid(rhs)`,
+with `0 <= self.rem_euclid(rhs) < rhs`.
+
+In other words, the result is `self / rhs` rounded to the integer `n`
+such that `self >= n * rhs`.
+If `self > 0`, this is equal to round towards zero (the default in Rust);
+if `self < 0`, this is equal to round towards +/- infinity.
+
+# Panics
+
+This function will panic if `rhs` is 0 or the division results in overflow.
+
+# Examples
+
+Basic usage:
+
+```
+let a: ", stringify!($SelfT), " = 7; // or any other integer type
+let b = 4;
+
+assert_eq!(a.div_euclid(b), 1); // 7 >= 4 * 1
+assert_eq!(a.div_euclid(-b), -1); // 7 >= -4 * -1
+assert_eq!((-a).div_euclid(b), -2); // -7 >= 4 * -2
+assert_eq!((-a).div_euclid(-b), 2); // -7 >= -4 * 2
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_euclid(self, rhs: Self) -> Self {
+ let q = self / rhs;
+ if self % rhs < 0 {
+ return if rhs > 0 { q - 1 } else { q + 1 }
+ }
+ q
+ }
+ }
+
+
+ doc_comment! {
+ concat!("Calculates the least nonnegative remainder of `self (mod rhs)`.
+
+This is done as if by the Euclidean division algorithm -- given
+`r = self.rem_euclid(rhs)`, `self = rhs * self.div_euclid(rhs) + r`, and
+`0 <= r < abs(rhs)`.
+
+# Panics
+
+This function will panic if `rhs` is 0 or the division results in overflow.
+
+# Examples
+
+Basic usage:
+
+```
+let a: ", stringify!($SelfT), " = 7; // or any other integer type
+let b = 4;
+
+assert_eq!(a.rem_euclid(b), 3);
+assert_eq!((-a).rem_euclid(b), 1);
+assert_eq!(a.rem_euclid(-b), 3);
+assert_eq!((-a).rem_euclid(-b), 1);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn rem_euclid(self, rhs: Self) -> Self {
+ let r = self % rhs;
+ if r < 0 {
+ if rhs < 0 {
+ r - rhs
+ } else {
+ r + rhs
+ }
+ } else {
+ r
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Computes the absolute value of `self`.
+
+# Overflow behavior
+
+The absolute value of `", stringify!($SelfT), "::MIN` cannot be represented as an
+`", stringify!($SelfT), "`, and attempting to calculate it will cause an overflow. This means that
+code in debug mode will trigger a panic on this case and optimized code will return `",
+stringify!($SelfT), "::MIN` without a panic.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(10", stringify!($SelfT), ".abs(), 10);
+assert_eq!((-10", stringify!($SelfT), ").abs(), 10);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[allow(unused_attributes)]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn abs(self) -> Self {
+ // Note that the #[inline] above means that the overflow
+ // semantics of the subtraction depend on the crate we're being
+ // inlined into.
+ if self.is_negative() {
+ -self
+ } else {
+ self
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns a number representing sign of `self`.
+
+ - `0` if the number is zero
+ - `1` if the number is positive
+ - `-1` if the number is negative
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(10", stringify!($SelfT), ".signum(), 1);
+assert_eq!(0", stringify!($SelfT), ".signum(), 0);
+assert_eq!((-10", stringify!($SelfT), ").signum(), -1);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_sign", since = "1.47.0")]
+ #[inline]
+ pub const fn signum(self) -> Self {
+ match self {
+ n if n > 0 => 1,
+ 0 => 0,
+ _ => -1,
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns `true` if `self` is positive and `false` if the number is zero or
+negative.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert!(10", stringify!($SelfT), ".is_positive());
+assert!(!(-10", stringify!($SelfT), ").is_positive());",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn is_positive(self) -> bool { self > 0 }
+ }
+
+ doc_comment! {
+ concat!("Returns `true` if `self` is negative and `false` if the number is zero or
+positive.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert!((-10", stringify!($SelfT), ").is_negative());
+assert!(!10", stringify!($SelfT), ".is_negative());",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn is_negative(self) -> bool { self < 0 }
+ }
+
+ doc_comment! {
+ concat!("Return the memory representation of this integer as a byte array in
+big-endian (network) byte order.
+",
+$to_xe_bytes_doc,
+"
+# Examples
+
+```
+let bytes = ", $swap_op, stringify!($SelfT), ".to_be_bytes();
+assert_eq!(bytes, ", $be_bytes, ");
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_be().to_ne_bytes()
+ }
+ }
+
+doc_comment! {
+ concat!("Return the memory representation of this integer as a byte array in
+little-endian byte order.
+",
+$to_xe_bytes_doc,
+"
+# Examples
+
+```
+let bytes = ", $swap_op, stringify!($SelfT), ".to_le_bytes();
+assert_eq!(bytes, ", $le_bytes, ");
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_le().to_ne_bytes()
+ }
+ }
+
+ doc_comment! {
+ concat!("
+Return the memory representation of this integer as a byte array in
+native byte order.
+
+As the target platform's native endianness is used, portable code
+should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate,
+instead.
+",
+$to_xe_bytes_doc,
+"
+[`to_be_bytes`]: #method.to_be_bytes
+[`to_le_bytes`]: #method.to_le_bytes
+
+# Examples
+
+```
+let bytes = ", $swap_op, stringify!($SelfT), ".to_ne_bytes();
+assert_eq!(
+ bytes,
+ if cfg!(target_endian = \"big\") {
+ ", $be_bytes, "
+ } else {
+ ", $le_bytes, "
+ }
+);
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute them to arrays of bytes
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ // SAFETY: integers are plain old datatypes so we can always transmute them to
+ // arrays of bytes
+ unsafe { mem::transmute(self) }
+ }
+ }
+
+ doc_comment! {
+ concat!("
+Return the memory representation of this integer as a byte array in
+native byte order.
+
+[`to_ne_bytes`] should be preferred over this whenever possible.
+
+[`to_ne_bytes`]: #method.to_ne_bytes
+",
+
+"
+# Examples
+
+```
+#![feature(num_as_ne_bytes)]
+let num = ", $swap_op, stringify!($SelfT), ";
+let bytes = num.as_ne_bytes();
+assert_eq!(
+ bytes,
+ if cfg!(target_endian = \"big\") {
+ &", $be_bytes, "
+ } else {
+ &", $le_bytes, "
+ }
+);
+```"),
+ #[unstable(feature = "num_as_ne_bytes", issue = "76976")]
+ #[inline]
+ pub fn as_ne_bytes(&self) -> &[u8; mem::size_of::<Self>()] {
+ // SAFETY: integers are plain old datatypes so we can always transmute them to
+ // arrays of bytes
+ unsafe { &*(self as *const Self as *const _) }
+ }
+ }
+
+doc_comment! {
+ concat!("Create an integer value from its representation as a byte array in
+big endian.
+",
+$from_xe_bytes_doc,
+"
+# Examples
+
+```
+let value = ", stringify!($SelfT), "::from_be_bytes(", $be_bytes, ");
+assert_eq!(value, ", $swap_op, ");
+```
+
+When starting from a slice rather than an array, fallible conversion APIs can be used:
+
+```
+use std::convert::TryInto;
+
+fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {
+ let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());
+ *input = rest;
+ ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())
+}
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_be(Self::from_ne_bytes(bytes))
+ }
+ }
+
+doc_comment! {
+ concat!("
+Create an integer value from its representation as a byte array in
+little endian.
+",
+$from_xe_bytes_doc,
+"
+# Examples
+
+```
+let value = ", stringify!($SelfT), "::from_le_bytes(", $le_bytes, ");
+assert_eq!(value, ", $swap_op, ");
+```
+
+When starting from a slice rather than an array, fallible conversion APIs can be used:
+
+```
+use std::convert::TryInto;
+
+fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {
+ let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());
+ *input = rest;
+ ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())
+}
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+ }
+
+ doc_comment! {
+ concat!("Create an integer value from its memory representation as a byte
+array in native endianness.
+
+As the target platform's native endianness is used, portable code
+likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+appropriate instead.
+
+[`from_be_bytes`]: #method.from_be_bytes
+[`from_le_bytes`]: #method.from_le_bytes
+",
+$from_xe_bytes_doc,
+"
+# Examples
+
+```
+let value = ", stringify!($SelfT), "::from_ne_bytes(if cfg!(target_endian = \"big\") {
+ ", $be_bytes, "
+} else {
+ ", $le_bytes, "
+});
+assert_eq!(value, ", $swap_op, ");
+```
+
+When starting from a slice rather than an array, fallible conversion APIs can be used:
+
+```
+use std::convert::TryInto;
+
+fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {
+ let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());
+ *input = rest;
+ ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())
+}
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute to them
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))]
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ // SAFETY: integers are plain old datatypes so we can always transmute to them
+ unsafe { mem::transmute(bytes) }
+ }
+ }
+
+ doc_comment! {
+ concat!("**This method is soft-deprecated.**
+
+Although using it won’t cause a compilation warning,
+new code should use [`", stringify!($SelfT), "::MIN", "`](#associatedconstant.MIN) instead.
+
+Returns the smallest value that can be represented by this integer type."),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline(always)]
+ #[rustc_promotable]
+ #[rustc_const_stable(feature = "const_min_value", since = "1.32.0")]
+ pub const fn min_value() -> Self {
+ Self::MIN
+ }
+ }
+
+ doc_comment! {
+ concat!("**This method is soft-deprecated.**
+
+Although using it won’t cause a compilation warning,
+new code should use [`", stringify!($SelfT), "::MAX", "`](#associatedconstant.MAX) instead.
+
+Returns the largest value that can be represented by this integer type."),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline(always)]
+ #[rustc_promotable]
+ #[rustc_const_stable(feature = "const_max_value", since = "1.32.0")]
+ pub const fn max_value() -> Self {
+ Self::MAX
+ }
+ }
+ }
+}
--- /dev/null
+//! Numeric traits and functions for the built-in numeric types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::intrinsics;
+use crate::mem;
+use crate::str::FromStr;
+
+// Used because the `?` operator is not allowed in a const context.
+macro_rules! try_opt {
+ ($e:expr) => {
+ match $e {
+ Some(x) => x,
+ None => return None,
+ }
+ };
+}
+
+#[allow_internal_unstable(const_likely)]
+macro_rules! unlikely {
+ ($e: expr) => {
+ intrinsics::unlikely($e)
+ };
+}
+
+macro_rules! doc_comment {
+ ($x:expr, $($tt:tt)*) => {
+ #[doc = $x]
+ $($tt)*
+ };
+}
+
+// All these modules are technically private and only exposed for coretests:
+pub mod bignum;
+pub mod dec2flt;
+pub mod diy_float;
+pub mod flt2dec;
+
+#[macro_use]
+mod int_macros; // import int_impl!
+#[macro_use]
+mod uint_macros; // import uint_impl!
+
+mod error;
+mod nonzero;
+mod wrapping;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use wrapping::Wrapping;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use dec2flt::ParseFloatError;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use error::ParseIntError;
+
+#[stable(feature = "nonzero", since = "1.28.0")]
+pub use nonzero::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize};
+
+#[stable(feature = "signed_nonzero", since = "1.34.0")]
+pub use nonzero::{NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize};
+
+#[stable(feature = "try_from", since = "1.34.0")]
+pub use error::TryFromIntError;
+
+#[unstable(
+ feature = "int_error_matching",
+ reason = "it can be useful to match errors when making error messages \
+ for integer parsing",
+ issue = "22639"
+)]
+pub use error::IntErrorKind;
+
+macro_rules! usize_isize_to_xe_bytes_doc {
+ () => {
+ "
+
+**Note**: This function returns an array of length 2, 4 or 8 bytes
+depending on the target pointer size.
+
+"
+ };
+}
+
+macro_rules! usize_isize_from_xe_bytes_doc {
+ () => {
+ "
+
+**Note**: This function takes an array of length 2, 4 or 8 bytes
+depending on the target pointer size.
+
+"
+ };
+}
+
+#[lang = "i8"]
+impl i8 {
+ int_impl! { i8, i8, u8, 8, -128, 127, "", "", 2, "-0x7e", "0xa", "0x12", "0x12", "0x48",
+ "[0x12]", "[0x12]", "", "" }
+}
+
+#[lang = "i16"]
+impl i16 {
+ int_impl! { i16, i16, u16, 16, -32768, 32767, "", "", 4, "-0x5ffd", "0x3a", "0x1234", "0x3412",
+ "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]", "", "" }
+}
+
+#[lang = "i32"]
+impl i32 {
+ int_impl! { i32, i32, u32, 32, -2147483648, 2147483647, "", "", 8, "0x10000b3", "0xb301",
+ "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78]", "", "" }
+}
+
+#[lang = "i64"]
+impl i64 {
+ int_impl! { i64, i64, u64, 64, -9223372036854775808, 9223372036854775807, "", "", 12,
+ "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412",
+ "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]", "", "" }
+}
+
+#[lang = "i128"]
+impl i128 {
+ int_impl! { i128, i128, u128, 128, -170141183460469231731687303715884105728,
+ 170141183460469231731687303715884105727, "", "", 16,
+ "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012",
+ "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48",
+ "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \
+ 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \
+ 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]", "", "" }
+}
+
+#[cfg(target_pointer_width = "16")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { isize, i16, usize, 16, -32768, 32767, "", "", 4, "-0x5ffd", "0x3a", "0x1234",
+ "0x3412", "0x2c48", "[0x34, 0x12]", "[0x12, 0x34]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!() }
+}
+
+#[cfg(target_pointer_width = "32")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { isize, i32, usize, 32, -2147483648, 2147483647, "", "", 8, "0x10000b3", "0xb301",
+ "0x12345678", "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!() }
+}
+
+#[cfg(target_pointer_width = "64")]
+#[lang = "isize"]
+impl isize {
+ int_impl! { isize, i64, usize, 64, -9223372036854775808, 9223372036854775807, "", "",
+ 12, "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412",
+ "0x6a2c48091e6a2c48", "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!() }
+}
+
+#[lang = "u8"]
+impl u8 {
+ uint_impl! { u8, u8, 8, 255, "", "", 2, "0x82", "0xa", "0x12", "0x12", "0x48", "[0x12]",
+ "[0x12]", "", "" }
+
+ /// Checks if the value is within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = 97u8;
+ /// let non_ascii = 150u8;
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[rustc_const_stable(feature = "const_ascii_methods_on_intrinsics", since = "1.43.0")]
+ #[inline]
+ pub const fn is_ascii(&self) -> bool {
+ *self & 128 == 0
+ }
+
+ /// Makes a copy of the value in its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let lowercase_a = 97u8;
+ ///
+ /// assert_eq!(65, lowercase_a.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase`]: #method.make_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> u8 {
+ // Unset the fifth bit if this is a lowercase letter
+ *self & !((self.is_ascii_lowercase() as u8) << 5)
+ }
+
+ /// Makes a copy of the value in its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = 65u8;
+ ///
+ /// assert_eq!(97, uppercase_a.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase`]: #method.make_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> u8 {
+ // Set the fifth bit if this is an uppercase letter
+ *self | ((self.is_ascii_uppercase() as u8) << 5)
+ }
+
+ /// Checks that two values are an ASCII case-insensitive match.
+ ///
+ /// This is equivalent to `to_ascii_lowercase(a) == to_ascii_lowercase(b)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let lowercase_a = 97u8;
+ /// let uppercase_a = 65u8;
+ ///
+ /// assert!(lowercase_a.eq_ignore_ascii_case(&uppercase_a));
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &u8) -> bool {
+ self.to_ascii_lowercase() == other.to_ascii_lowercase()
+ }
+
+ /// Converts this value to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut byte = b'a';
+ ///
+ /// byte.make_ascii_uppercase();
+ ///
+ /// assert_eq!(b'A', byte);
+ /// ```
+ ///
+ /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ *self = self.to_ascii_uppercase();
+ }
+
+ /// Converts this value to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut byte = b'A';
+ ///
+ /// byte.make_ascii_lowercase();
+ ///
+ /// assert_eq!(b'a', byte);
+ /// ```
+ ///
+ /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ *self = self.to_ascii_lowercase();
+ }
+
+ /// Checks if the value is an ASCII alphabetic character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(uppercase_a.is_ascii_alphabetic());
+ /// assert!(uppercase_g.is_ascii_alphabetic());
+ /// assert!(a.is_ascii_alphabetic());
+ /// assert!(g.is_ascii_alphabetic());
+ /// assert!(!zero.is_ascii_alphabetic());
+ /// assert!(!percent.is_ascii_alphabetic());
+ /// assert!(!space.is_ascii_alphabetic());
+ /// assert!(!lf.is_ascii_alphabetic());
+ /// assert!(!esc.is_ascii_alphabetic());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphabetic(&self) -> bool {
+ matches!(*self, b'A'..=b'Z' | b'a'..=b'z')
+ }
+
+ /// Checks if the value is an ASCII uppercase character:
+ /// U+0041 'A' ..= U+005A 'Z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(uppercase_a.is_ascii_uppercase());
+ /// assert!(uppercase_g.is_ascii_uppercase());
+ /// assert!(!a.is_ascii_uppercase());
+ /// assert!(!g.is_ascii_uppercase());
+ /// assert!(!zero.is_ascii_uppercase());
+ /// assert!(!percent.is_ascii_uppercase());
+ /// assert!(!space.is_ascii_uppercase());
+ /// assert!(!lf.is_ascii_uppercase());
+ /// assert!(!esc.is_ascii_uppercase());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_uppercase(&self) -> bool {
+ matches!(*self, b'A'..=b'Z')
+ }
+
+ /// Checks if the value is an ASCII lowercase character:
+ /// U+0061 'a' ..= U+007A 'z'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(!uppercase_a.is_ascii_lowercase());
+ /// assert!(!uppercase_g.is_ascii_lowercase());
+ /// assert!(a.is_ascii_lowercase());
+ /// assert!(g.is_ascii_lowercase());
+ /// assert!(!zero.is_ascii_lowercase());
+ /// assert!(!percent.is_ascii_lowercase());
+ /// assert!(!space.is_ascii_lowercase());
+ /// assert!(!lf.is_ascii_lowercase());
+ /// assert!(!esc.is_ascii_lowercase());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_lowercase(&self) -> bool {
+ matches!(*self, b'a'..=b'z')
+ }
+
+ /// Checks if the value is an ASCII alphanumeric character:
+ ///
+ /// - U+0041 'A' ..= U+005A 'Z', or
+ /// - U+0061 'a' ..= U+007A 'z', or
+ /// - U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(uppercase_a.is_ascii_alphanumeric());
+ /// assert!(uppercase_g.is_ascii_alphanumeric());
+ /// assert!(a.is_ascii_alphanumeric());
+ /// assert!(g.is_ascii_alphanumeric());
+ /// assert!(zero.is_ascii_alphanumeric());
+ /// assert!(!percent.is_ascii_alphanumeric());
+ /// assert!(!space.is_ascii_alphanumeric());
+ /// assert!(!lf.is_ascii_alphanumeric());
+ /// assert!(!esc.is_ascii_alphanumeric());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_alphanumeric(&self) -> bool {
+ matches!(*self, b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z')
+ }
+
+ /// Checks if the value is an ASCII decimal digit:
+ /// U+0030 '0' ..= U+0039 '9'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(!uppercase_a.is_ascii_digit());
+ /// assert!(!uppercase_g.is_ascii_digit());
+ /// assert!(!a.is_ascii_digit());
+ /// assert!(!g.is_ascii_digit());
+ /// assert!(zero.is_ascii_digit());
+ /// assert!(!percent.is_ascii_digit());
+ /// assert!(!space.is_ascii_digit());
+ /// assert!(!lf.is_ascii_digit());
+ /// assert!(!esc.is_ascii_digit());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_digit(&self) -> bool {
+ matches!(*self, b'0'..=b'9')
+ }
+
+ /// Checks if the value is an ASCII hexadecimal digit:
+ ///
+ /// - U+0030 '0' ..= U+0039 '9', or
+ /// - U+0041 'A' ..= U+0046 'F', or
+ /// - U+0061 'a' ..= U+0066 'f'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(uppercase_a.is_ascii_hexdigit());
+ /// assert!(!uppercase_g.is_ascii_hexdigit());
+ /// assert!(a.is_ascii_hexdigit());
+ /// assert!(!g.is_ascii_hexdigit());
+ /// assert!(zero.is_ascii_hexdigit());
+ /// assert!(!percent.is_ascii_hexdigit());
+ /// assert!(!space.is_ascii_hexdigit());
+ /// assert!(!lf.is_ascii_hexdigit());
+ /// assert!(!esc.is_ascii_hexdigit());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_hexdigit(&self) -> bool {
+ matches!(*self, b'0'..=b'9' | b'A'..=b'F' | b'a'..=b'f')
+ }
+
+ /// Checks if the value is an ASCII punctuation character:
+ ///
+ /// - U+0021 ..= U+002F `! " # $ % & ' ( ) * + , - . /`, or
+ /// - U+003A ..= U+0040 `: ; < = > ? @`, or
+ /// - U+005B ..= U+0060 ``[ \ ] ^ _ ` ``, or
+ /// - U+007B ..= U+007E `{ | } ~`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(!uppercase_a.is_ascii_punctuation());
+ /// assert!(!uppercase_g.is_ascii_punctuation());
+ /// assert!(!a.is_ascii_punctuation());
+ /// assert!(!g.is_ascii_punctuation());
+ /// assert!(!zero.is_ascii_punctuation());
+ /// assert!(percent.is_ascii_punctuation());
+ /// assert!(!space.is_ascii_punctuation());
+ /// assert!(!lf.is_ascii_punctuation());
+ /// assert!(!esc.is_ascii_punctuation());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_punctuation(&self) -> bool {
+ matches!(*self, b'!'..=b'/' | b':'..=b'@' | b'['..=b'`' | b'{'..=b'~')
+ }
+
+ /// Checks if the value is an ASCII graphic character:
+ /// U+0021 '!' ..= U+007E '~'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(uppercase_a.is_ascii_graphic());
+ /// assert!(uppercase_g.is_ascii_graphic());
+ /// assert!(a.is_ascii_graphic());
+ /// assert!(g.is_ascii_graphic());
+ /// assert!(zero.is_ascii_graphic());
+ /// assert!(percent.is_ascii_graphic());
+ /// assert!(!space.is_ascii_graphic());
+ /// assert!(!lf.is_ascii_graphic());
+ /// assert!(!esc.is_ascii_graphic());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_graphic(&self) -> bool {
+ matches!(*self, b'!'..=b'~')
+ }
+
+ /// Checks if the value is an ASCII whitespace character:
+ /// U+0020 SPACE, U+0009 HORIZONTAL TAB, U+000A LINE FEED,
+ /// U+000C FORM FEED, or U+000D CARRIAGE RETURN.
+ ///
+ /// Rust uses the WhatWG Infra Standard's [definition of ASCII
+ /// whitespace][infra-aw]. There are several other definitions in
+ /// wide use. For instance, [the POSIX locale][pct] includes
+ /// U+000B VERTICAL TAB as well as all the above characters,
+ /// but—from the very same specification—[the default rule for
+ /// "field splitting" in the Bourne shell][bfs] considers *only*
+ /// SPACE, HORIZONTAL TAB, and LINE FEED as whitespace.
+ ///
+ /// If you are writing a program that will process an existing
+ /// file format, check what that format's definition of whitespace is
+ /// before using this function.
+ ///
+ /// [infra-aw]: https://infra.spec.whatwg.org/#ascii-whitespace
+ /// [pct]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap07.html#tag_07_03_01
+ /// [bfs]: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(!uppercase_a.is_ascii_whitespace());
+ /// assert!(!uppercase_g.is_ascii_whitespace());
+ /// assert!(!a.is_ascii_whitespace());
+ /// assert!(!g.is_ascii_whitespace());
+ /// assert!(!zero.is_ascii_whitespace());
+ /// assert!(!percent.is_ascii_whitespace());
+ /// assert!(space.is_ascii_whitespace());
+ /// assert!(lf.is_ascii_whitespace());
+ /// assert!(!esc.is_ascii_whitespace());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_whitespace(&self) -> bool {
+ matches!(*self, b'\t' | b'\n' | b'\x0C' | b'\r' | b' ')
+ }
+
+ /// Checks if the value is an ASCII control character:
+ /// U+0000 NUL ..= U+001F UNIT SEPARATOR, or U+007F DELETE.
+ /// Note that most ASCII whitespace characters are control
+ /// characters, but SPACE is not.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let uppercase_a = b'A';
+ /// let uppercase_g = b'G';
+ /// let a = b'a';
+ /// let g = b'g';
+ /// let zero = b'0';
+ /// let percent = b'%';
+ /// let space = b' ';
+ /// let lf = b'\n';
+ /// let esc = 0x1b_u8;
+ ///
+ /// assert!(!uppercase_a.is_ascii_control());
+ /// assert!(!uppercase_g.is_ascii_control());
+ /// assert!(!a.is_ascii_control());
+ /// assert!(!g.is_ascii_control());
+ /// assert!(!zero.is_ascii_control());
+ /// assert!(!percent.is_ascii_control());
+ /// assert!(!space.is_ascii_control());
+ /// assert!(lf.is_ascii_control());
+ /// assert!(esc.is_ascii_control());
+ /// ```
+ #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")]
+ #[rustc_const_stable(feature = "const_ascii_ctype_on_intrinsics", since = "1.47.0")]
+ #[inline]
+ pub const fn is_ascii_control(&self) -> bool {
+ matches!(*self, b'\0'..=b'\x1F' | b'\x7F')
+ }
+}
+
+#[lang = "u16"]
+impl u16 {
+ uint_impl! { u16, u16, 16, 65535, "", "", 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48",
+ "[0x34, 0x12]", "[0x12, 0x34]", "", "" }
+}
+
+#[lang = "u32"]
+impl u32 {
+ uint_impl! { u32, u32, 32, 4294967295, "", "", 8, "0x10000b3", "0xb301", "0x12345678",
+ "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]", "", "" }
+}
+
+#[lang = "u64"]
+impl u64 {
+ uint_impl! { u64, u64, 64, 18446744073709551615, "", "", 12, "0xaa00000000006e1", "0x6e10aa",
+ "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48",
+ "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]",
+ "", ""}
+}
+
+#[lang = "u128"]
+impl u128 {
+ uint_impl! { u128, u128, 128, 340282366920938463463374607431768211455, "", "", 16,
+ "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012",
+ "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48",
+ "[0x12, 0x90, 0x78, 0x56, 0x34, 0x12, 0x90, 0x78, \
+ 0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56, \
+ 0x78, 0x90, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12]",
+ "", ""}
+}
+
+#[cfg(target_pointer_width = "16")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { usize, u16, 16, 65535, "", "", 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48",
+ "[0x34, 0x12]", "[0x12, 0x34]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!() }
+}
+#[cfg(target_pointer_width = "32")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { usize, u32, 32, 4294967295, "", "", 8, "0x10000b3", "0xb301", "0x12345678",
+ "0x78563412", "0x1e6a2c48", "[0x78, 0x56, 0x34, 0x12]", "[0x12, 0x34, 0x56, 0x78]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!() }
+}
+
+#[cfg(target_pointer_width = "64")]
+#[lang = "usize"]
+impl usize {
+ uint_impl! { usize, u64, 64, 18446744073709551615, "", "", 12, "0xaa00000000006e1", "0x6e10aa",
+ "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48",
+ "[0x56, 0x34, 0x12, 0x90, 0x78, 0x56, 0x34, 0x12]",
+ "[0x12, 0x34, 0x56, 0x78, 0x90, 0x12, 0x34, 0x56]",
+ usize_isize_to_xe_bytes_doc!(), usize_isize_from_xe_bytes_doc!() }
+}
+
+/// A classification of floating point numbers.
+///
+/// This `enum` is used as the return type for [`f32::classify`] and [`f64::classify`]. See
+/// their documentation for more.
+///
+/// [`f32::classify`]: ../../std/primitive.f32.html#method.classify
+/// [`f64::classify`]: ../../std/primitive.f64.html#method.classify
+///
+/// # Examples
+///
+/// ```
+/// use std::num::FpCategory;
+///
+/// let num = 12.4_f32;
+/// let inf = f32::INFINITY;
+/// let zero = 0f32;
+/// let sub: f32 = 1.1754942e-38;
+/// let nan = f32::NAN;
+///
+/// assert_eq!(num.classify(), FpCategory::Normal);
+/// assert_eq!(inf.classify(), FpCategory::Infinite);
+/// assert_eq!(zero.classify(), FpCategory::Zero);
+/// assert_eq!(nan.classify(), FpCategory::Nan);
+/// assert_eq!(sub.classify(), FpCategory::Subnormal);
+/// ```
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum FpCategory {
+ /// "Not a Number", often obtained by dividing by zero.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Nan,
+
+ /// Positive or negative infinity.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Infinite,
+
+ /// Positive or negative zero.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Zero,
+
+ /// De-normalized floating point representation (less precise than `Normal`).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Subnormal,
+
+ /// A regular floating point number.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Normal,
+}
+
+#[doc(hidden)]
+trait FromStrRadixHelper: PartialOrd + Copy {
+ fn min_value() -> Self;
+ fn max_value() -> Self;
+ fn from_u32(u: u32) -> Self;
+ fn checked_mul(&self, other: u32) -> Option<Self>;
+ fn checked_sub(&self, other: u32) -> Option<Self>;
+ fn checked_add(&self, other: u32) -> Option<Self>;
+}
+
+macro_rules! from_str_radix_int_impl {
+ ($($t:ty)*) => {$(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl FromStr for $t {
+ type Err = ParseIntError;
+ fn from_str(src: &str) -> Result<Self, ParseIntError> {
+ from_str_radix(src, 10)
+ }
+ }
+ )*}
+}
+from_str_radix_int_impl! { isize i8 i16 i32 i64 i128 usize u8 u16 u32 u64 u128 }
+
+macro_rules! doit {
+ ($($t:ty)*) => ($(impl FromStrRadixHelper for $t {
+ #[inline]
+ fn min_value() -> Self { Self::MIN }
+ #[inline]
+ fn max_value() -> Self { Self::MAX }
+ #[inline]
+ fn from_u32(u: u32) -> Self { u as Self }
+ #[inline]
+ fn checked_mul(&self, other: u32) -> Option<Self> {
+ Self::checked_mul(*self, other as Self)
+ }
+ #[inline]
+ fn checked_sub(&self, other: u32) -> Option<Self> {
+ Self::checked_sub(*self, other as Self)
+ }
+ #[inline]
+ fn checked_add(&self, other: u32) -> Option<Self> {
+ Self::checked_add(*self, other as Self)
+ }
+ })*)
+}
+doit! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize }
+
+fn from_str_radix<T: FromStrRadixHelper>(src: &str, radix: u32) -> Result<T, ParseIntError> {
+ use self::IntErrorKind::*;
+ use self::ParseIntError as PIE;
+
+ assert!(
+ radix >= 2 && radix <= 36,
+ "from_str_radix_int: must lie in the range `[2, 36]` - found {}",
+ radix
+ );
+
+ if src.is_empty() {
+ return Err(PIE { kind: Empty });
+ }
+
+ let is_signed_ty = T::from_u32(0) > T::min_value();
+
+ // all valid digits are ascii, so we will just iterate over the utf8 bytes
+ // and cast them to chars. .to_digit() will safely return None for anything
+ // other than a valid ascii digit for the given radix, including the first-byte
+ // of multi-byte sequences
+ let src = src.as_bytes();
+
+ let (is_positive, digits) = match src[0] {
+ b'+' | b'-' if src[1..].is_empty() => {
+ return Err(PIE { kind: InvalidDigit });
+ }
+ b'+' => (true, &src[1..]),
+ b'-' if is_signed_ty => (false, &src[1..]),
+ _ => (true, src),
+ };
+
+ let mut result = T::from_u32(0);
+ if is_positive {
+ // The number is positive
+ for &c in digits {
+ let x = match (c as char).to_digit(radix) {
+ Some(x) => x,
+ None => return Err(PIE { kind: InvalidDigit }),
+ };
+ result = match result.checked_mul(radix) {
+ Some(result) => result,
+ None => return Err(PIE { kind: PosOverflow }),
+ };
+ result = match result.checked_add(x) {
+ Some(result) => result,
+ None => return Err(PIE { kind: PosOverflow }),
+ };
+ }
+ } else {
+ // The number is negative
+ for &c in digits {
+ let x = match (c as char).to_digit(radix) {
+ Some(x) => x,
+ None => return Err(PIE { kind: InvalidDigit }),
+ };
+ result = match result.checked_mul(radix) {
+ Some(result) => result,
+ None => return Err(PIE { kind: NegOverflow }),
+ };
+ result = match result.checked_sub(x) {
+ Some(result) => result,
+ None => return Err(PIE { kind: NegOverflow }),
+ };
+ }
+ }
+ Ok(result)
+}
--- /dev/null
+//! Definitions of integer that is known not to equal zero.
+
+use crate::fmt;
+use crate::ops::{BitOr, BitOrAssign};
+use crate::str::FromStr;
+
+use super::from_str_radix;
+use super::{IntErrorKind, ParseIntError};
+
+macro_rules! doc_comment {
+ ($x:expr, $($tt:tt)*) => {
+ #[doc = $x]
+ $($tt)*
+ };
+}
+
+macro_rules! impl_nonzero_fmt {
+ ( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => {
+ $(
+ #[$stability]
+ impl fmt::$Trait for $Ty {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.get().fmt(f)
+ }
+ }
+ )+
+ }
+}
+
+macro_rules! nonzero_integers {
+ ( $( #[$stability: meta] $Ty: ident($Int: ty); )+ ) => {
+ $(
+ doc_comment! {
+ concat!("An integer that is known not to equal zero.
+
+This enables some memory layout optimization.
+For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!($Int), "`:
+
+```rust
+use std::mem::size_of;
+assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", stringify!($Int),
+">());
+```"),
+ #[$stability]
+ #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
+ #[repr(transparent)]
+ #[rustc_layout_scalar_valid_range_start(1)]
+ #[rustc_nonnull_optimization_guaranteed]
+ pub struct $Ty($Int);
+ }
+
+ impl $Ty {
+ /// Creates a non-zero without checking the value.
+ ///
+ /// # Safety
+ ///
+ /// The value must not be zero.
+ #[$stability]
+ #[rustc_const_stable(feature = "nonzero", since = "1.34.0")]
+ #[inline]
+ pub const unsafe fn new_unchecked(n: $Int) -> Self {
+ // SAFETY: this is guaranteed to be safe by the caller.
+ unsafe { Self(n) }
+ }
+
+ /// Creates a non-zero if the given value is not zero.
+ #[$stability]
+ #[rustc_const_stable(feature = "const_nonzero_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn new(n: $Int) -> Option<Self> {
+ if n != 0 {
+ // SAFETY: we just checked that there's no `0`
+ Some(unsafe { Self(n) })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the value as a primitive type.
+ #[$stability]
+ #[inline]
+ #[rustc_const_stable(feature = "nonzero", since = "1.34.0")]
+ pub const fn get(self) -> $Int {
+ self.0
+ }
+
+ }
+
+ #[stable(feature = "from_nonzero", since = "1.31.0")]
+ impl From<$Ty> for $Int {
+ doc_comment! {
+ concat!(
+"Converts a `", stringify!($Ty), "` into an `", stringify!($Int), "`"),
+ #[inline]
+ fn from(nonzero: $Ty) -> Self {
+ nonzero.0
+ }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ impl BitOr for $Ty {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self::Output {
+ // SAFETY: since `self` and `rhs` are both nonzero, the
+ // result of the bitwise-or will be nonzero.
+ unsafe { $Ty::new_unchecked(self.get() | rhs.get()) }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ impl BitOr<$Int> for $Ty {
+ type Output = Self;
+ #[inline]
+ fn bitor(self, rhs: $Int) -> Self::Output {
+ // SAFETY: since `self` is nonzero, the result of the
+ // bitwise-or will be nonzero regardless of the value of
+ // `rhs`.
+ unsafe { $Ty::new_unchecked(self.get() | rhs) }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ impl BitOr<$Ty> for $Int {
+ type Output = $Ty;
+ #[inline]
+ fn bitor(self, rhs: $Ty) -> Self::Output {
+ // SAFETY: since `rhs` is nonzero, the result of the
+ // bitwise-or will be nonzero regardless of the value of
+ // `self`.
+ unsafe { $Ty::new_unchecked(self | rhs.get()) }
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ impl BitOrAssign for $Ty {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ *self = *self | rhs;
+ }
+ }
+
+ #[stable(feature = "nonzero_bitor", since = "1.45.0")]
+ impl BitOrAssign<$Int> for $Ty {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: $Int) {
+ *self = *self | rhs;
+ }
+ }
+
+ impl_nonzero_fmt! {
+ #[$stability] (Debug, Display, Binary, Octal, LowerHex, UpperHex) for $Ty
+ }
+ )+
+ }
+}
+
+nonzero_integers! {
+ #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU8(u8);
+ #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU16(u16);
+ #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU32(u32);
+ #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU64(u64);
+ #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU128(u128);
+ #[stable(feature = "nonzero", since = "1.28.0")] NonZeroUsize(usize);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI8(i8);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI16(i16);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI32(i32);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI64(i64);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI128(i128);
+ #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroIsize(isize);
+}
+
+macro_rules! from_str_radix_nzint_impl {
+ ($($t:ty)*) => {$(
+ #[stable(feature = "nonzero_parse", since = "1.35.0")]
+ impl FromStr for $t {
+ type Err = ParseIntError;
+ fn from_str(src: &str) -> Result<Self, Self::Err> {
+ Self::new(from_str_radix(src, 10)?)
+ .ok_or(ParseIntError {
+ kind: IntErrorKind::Zero
+ })
+ }
+ }
+ )*}
+}
+
+from_str_radix_nzint_impl! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize
+NonZeroI8 NonZeroI16 NonZeroI32 NonZeroI64 NonZeroI128 NonZeroIsize }
--- /dev/null
+//! The 128-bit signed integer type.
+//!
+//! *[See also the `i128` primitive type](../../std/primitive.i128.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "i128", since = "1.26.0")]
+
+int_module! { i128, #[stable(feature = "i128", since="1.26.0")] }
--- /dev/null
+//! The 16-bit signed integer type.
+//!
+//! *[See also the `i16` primitive type](../../std/primitive.i16.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i16 }
--- /dev/null
+//! The 32-bit signed integer type.
+//!
+//! *[See also the `i32` primitive type](../../std/primitive.i32.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i32 }
--- /dev/null
+//! The 64-bit signed integer type.
+//!
+//! *[See also the `i64` primitive type](../../std/primitive.i64.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i64 }
--- /dev/null
+//! The 8-bit signed integer type.
+//!
+//! *[See also the `i8` primitive type](../../std/primitive.i8.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { i8 }
--- /dev/null
+#![doc(hidden)]
+
+macro_rules! doc_comment {
+ ($x:expr, $($tt:tt)*) => {
+ #[doc = $x]
+ $($tt)*
+ };
+}
+
+macro_rules! int_module {
+ ($T:ident) => (int_module!($T, #[stable(feature = "rust1", since = "1.0.0")]););
+ ($T:ident, #[$attr:meta]) => (
+ doc_comment! {
+ concat!("The smallest value that can be represented by this integer type.
+Use [`", stringify!($T), "::MIN", "`](../../std/primitive.", stringify!($T), ".html#associatedconstant.MIN) instead.
+
+# Examples
+
+```rust
+// deprecated way
+let min = std::", stringify!($T), "::MIN;
+
+// intended way
+let min = ", stringify!($T), "::MIN;
+```
+"),
+ #[$attr]
+ pub const MIN: $T = $T::MIN;
+ }
+
+ doc_comment! {
+ concat!("The largest value that can be represented by this integer type.
+Use [`", stringify!($T), "::MAX", "`](../../std/primitive.", stringify!($T), ".html#associatedconstant.MAX) instead.
+
+# Examples
+
+```rust
+// deprecated way
+let max = std::", stringify!($T), "::MAX;
+
+// intended way
+let max = ", stringify!($T), "::MAX;
+```
+"),
+ #[$attr]
+ pub const MAX: $T = $T::MAX;
+ }
+ )
+}
--- /dev/null
+//! The pointer-sized signed integer type.
+//!
+//! *[See also the `isize` primitive type](../../std/primitive.isize.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { isize }
--- /dev/null
+//! The 128-bit unsigned integer type.
+//!
+//! *[See also the `u128` primitive type](../../std/primitive.u128.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "i128", since = "1.26.0")]
+int_module! { u128, #[stable(feature = "i128", since="1.26.0")] }
--- /dev/null
+//! The 16-bit unsigned integer type.
+//!
+//! *[See also the `u16` primitive type](../../std/primitive.u16.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { u16 }
--- /dev/null
+//! The 32-bit unsigned integer type.
+//!
+//! *[See also the `u32` primitive type](../../std/primitive.u32.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { u32 }
--- /dev/null
+//! The 64-bit unsigned integer type.
+//!
+//! *[See also the `u64` primitive type](../../std/primitive.u64.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { u64 }
--- /dev/null
+//! The 8-bit unsigned integer type.
+//!
+//! *[See also the `u8` primitive type](../../std/primitive.u8.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { u8 }
--- /dev/null
+//! The pointer-sized unsigned integer type.
+//!
+//! *[See also the `usize` primitive type](../../std/primitive.usize.html).*
+//!
+//! Although using these constants won’t cause compilation warnings,
+//! new code should use the associated constants directly on the primitive type.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+int_module! { usize }
--- /dev/null
+macro_rules! uint_impl {
+ ($SelfT:ty, $ActualT:ty, $BITS:expr, $MaxV:expr, $Feature:expr, $EndFeature:expr,
+ $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr,
+ $reversed:expr, $le_bytes:expr, $be_bytes:expr,
+ $to_xe_bytes_doc:expr, $from_xe_bytes_doc:expr) => {
+ doc_comment! {
+ concat!("The smallest value that can be represented by this integer type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::MIN, 0);", $EndFeature, "
+```"),
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MIN: Self = 0;
+ }
+
+ doc_comment! {
+ concat!("The largest value that can be represented by this integer type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::MAX, ", stringify!($MaxV), ");",
+$EndFeature, "
+```"),
+ #[stable(feature = "assoc_int_consts", since = "1.43.0")]
+ pub const MAX: Self = !0;
+ }
+
+ doc_comment! {
+ concat!("The size of this integer type in bits.
+
+# Examples
+
+```
+", $Feature, "#![feature(int_bits_const)]
+assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[unstable(feature = "int_bits_const", issue = "76904")]
+ pub const BITS: u32 = $BITS;
+ }
+
+ doc_comment! {
+ concat!("Converts a string slice in a given base to an integer.
+
+The string is expected to be an optional `+` sign
+followed by digits.
+Leading and trailing whitespace represent an error.
+Digits are a subset of these characters, depending on `radix`:
+
+* `0-9`
+* `a-z`
+* `A-Z`
+
+# Panics
+
+This function panics if `radix` is not in the range from 2 to 36.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_str_radix(src: &str, radix: u32) -> Result<Self, ParseIntError> {
+ from_str_radix(src, radix)
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of ones in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0b01001100", stringify!($SelfT), ";
+
+assert_eq!(n.count_ones(), 3);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn count_ones(self) -> u32 {
+ intrinsics::ctpop(self as $ActualT) as u32
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(", stringify!($SelfT), "::MAX.count_zeros(), 0);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn count_zeros(self) -> u32 {
+ (!self).count_ones()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of leading zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = ", stringify!($SelfT), "::MAX >> 2;
+
+assert_eq!(n.leading_zeros(), 2);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn leading_zeros(self) -> u32 {
+ intrinsics::ctlz(self as $ActualT) as u32
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of trailing zeros in the binary representation
+of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0b0101000", stringify!($SelfT), ";
+
+assert_eq!(n.trailing_zeros(), 3);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn trailing_zeros(self) -> u32 {
+ intrinsics::cttz(self) as u32
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of leading ones in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = !(", stringify!($SelfT), "::MAX >> 2);
+
+assert_eq!(n.leading_ones(), 2);", $EndFeature, "
+```"),
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[inline]
+ pub const fn leading_ones(self) -> u32 {
+ (!self).leading_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of trailing ones in the binary representation
+of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0b1010111", stringify!($SelfT), ";
+
+assert_eq!(n.trailing_ones(), 3);", $EndFeature, "
+```"),
+ #[stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[rustc_const_stable(feature = "leading_trailing_ones", since = "1.46.0")]
+ #[inline]
+ pub const fn trailing_ones(self) -> u32 {
+ (!self).trailing_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts the bits to the left by a specified amount, `n`,
+wrapping the truncated bits to the end of the resulting integer.
+
+Please note this isn't the same operation as the `<<` shifting operator!
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $rot_op, stringify!($SelfT), ";
+let m = ", $rot_result, ";
+
+assert_eq!(n.rotate_left(", $rot, "), m);
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ intrinsics::rotate_left(self, n as $SelfT)
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts the bits to the right by a specified amount, `n`,
+wrapping the truncated bits to the beginning of the resulting
+integer.
+
+Please note this isn't the same operation as the `>>` shifting operator!
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $rot_result, stringify!($SelfT), ";
+let m = ", $rot_op, ";
+
+assert_eq!(n.rotate_right(", $rot, "), m);
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ intrinsics::rotate_right(self, n as $SelfT)
+ }
+ }
+
+ doc_comment! {
+ concat!("
+Reverses the byte order of the integer.
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $swap_op, stringify!($SelfT), ";
+let m = n.swap_bytes();
+
+assert_eq!(m, ", $swapped, ");
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn swap_bytes(self) -> Self {
+ intrinsics::bswap(self as $ActualT) as Self
+ }
+ }
+
+ doc_comment! {
+ concat!("Reverses the order of bits in the integer. The least significant bit becomes the most significant bit,
+ second least-significant bit becomes second most-significant bit, etc.
+
+# Examples
+
+Basic usage:
+
+```
+let n = ", $swap_op, stringify!($SelfT), ";
+let m = n.reverse_bits();
+
+assert_eq!(m, ", $reversed, ");
+assert_eq!(0, 0", stringify!($SelfT), ".reverse_bits());
+```"),
+ #[stable(feature = "reverse_bits", since = "1.37.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ #[must_use]
+ pub const fn reverse_bits(self) -> Self {
+ intrinsics::bitreverse(self as $ActualT) as Self
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts an integer from big endian to the target's endianness.
+
+On big endian this is a no-op. On little endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"big\") {
+ assert_eq!(", stringify!($SelfT), "::from_be(n), n)
+} else {
+ assert_eq!(", stringify!($SelfT), "::from_be(n), n.swap_bytes())
+}", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn from_be(x: Self) -> Self {
+ #[cfg(target_endian = "big")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ x.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts an integer from little endian to the target's endianness.
+
+On little endian this is a no-op. On big endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"little\") {
+ assert_eq!(", stringify!($SelfT), "::from_le(n), n)
+} else {
+ assert_eq!(", stringify!($SelfT), "::from_le(n), n.swap_bytes())
+}", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn from_le(x: Self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ x
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ x.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts `self` to big endian from the target's endianness.
+
+On big endian this is a no-op. On little endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"big\") {
+ assert_eq!(n.to_be(), n)
+} else {
+ assert_eq!(n.to_be(), n.swap_bytes())
+}", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn to_be(self) -> Self { // or not to be?
+ #[cfg(target_endian = "big")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "big"))]
+ {
+ self.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts `self` to little endian from the target's endianness.
+
+On little endian this is a no-op. On big endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "let n = 0x1A", stringify!($SelfT), ";
+
+if cfg!(target_endian = \"little\") {
+ assert_eq!(n.to_le(), n)
+} else {
+ assert_eq!(n.to_le(), n.swap_bytes())
+}", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[inline]
+ pub const fn to_le(self) -> Self {
+ #[cfg(target_endian = "little")]
+ {
+ self
+ }
+ #[cfg(not(target_endian = "little"))]
+ {
+ self.swap_bytes()
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer addition. Computes `self + rhs`, returning `None`
+if overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(1), ",
+"Some(", stringify!($SelfT), "::MAX - 1));
+assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_add(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_add(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Unchecked integer addition. Computes `self + rhs`, assuming overflow
+cannot occur. This results in undefined behavior when `self + rhs > ", stringify!($SelfT),
+"::MAX` or `self + rhs < ", stringify!($SelfT), "::MIN`."),
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "none",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_add`.
+ unsafe { intrinsics::unchecked_add(self, rhs) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer subtraction. Computes `self - rhs`, returning
+`None` if overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(1", stringify!($SelfT), ".checked_sub(1), Some(0));
+assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_sub(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Unchecked integer subtraction. Computes `self - rhs`, assuming overflow
+cannot occur. This results in undefined behavior when `self - rhs > ", stringify!($SelfT),
+"::MAX` or `self - rhs < ", stringify!($SelfT), "::MIN`."),
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "none",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_sub`.
+ unsafe { intrinsics::unchecked_sub(self, rhs) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer multiplication. Computes `self * rhs`, returning
+`None` if overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(5", stringify!($SelfT), ".checked_mul(1), Some(5));
+assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
+ let (a, b) = self.overflowing_mul(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Unchecked integer multiplication. Computes `self * rhs`, assuming overflow
+cannot occur. This results in undefined behavior when `self * rhs > ", stringify!($SelfT),
+"::MAX` or `self * rhs < ", stringify!($SelfT), "::MIN`."),
+ #[unstable(
+ feature = "unchecked_math",
+ reason = "niche optimization path",
+ issue = "none",
+ )]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+ // SAFETY: the caller must uphold the safety contract for
+ // `unchecked_mul`.
+ unsafe { intrinsics::unchecked_mul(self, rhs) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked integer division. Computes `self / rhs`, returning `None`
+if `rhs == 0`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(128", stringify!($SelfT), ".checked_div(2), Some(64));
+assert_eq!(1", stringify!($SelfT), ".checked_div(0), None);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_checked_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ // SAFETY: div by zero has been checked above and unsigned types have no other
+ // failure modes for division
+ Some(unsafe { intrinsics::unchecked_div(self, rhs) })
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked Euclidean division. Computes `self.div_euclid(rhs)`, returning `None`
+if `rhs == 0`.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(128", stringify!($SelfT), ".checked_div_euclid(2), Some(64));
+assert_eq!(1", stringify!($SelfT), ".checked_div_euclid(0), None);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ Some(self.div_euclid(rhs))
+ }
+ }
+ }
+
+
+ doc_comment! {
+ concat!("Checked integer remainder. Computes `self % rhs`, returning `None`
+if `rhs == 0`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1));
+assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_checked_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ // SAFETY: div by zero has been checked above and unsigned types have no other
+ // failure modes for division
+ Some(unsafe { intrinsics::unchecked_rem(self, rhs) })
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked Euclidean modulo. Computes `self.rem_euclid(rhs)`, returning `None`
+if `rhs == 0`.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(2), Some(1));
+assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
+ if unlikely!(rhs == 0) {
+ None
+ } else {
+ Some(self.rem_euclid(rhs))
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked negation. Computes `-self`, returning `None` unless `self ==
+0`.
+
+Note that negating any positive integer will overflow.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0", stringify!($SelfT), ".checked_neg(), Some(0));
+assert_eq!(1", stringify!($SelfT), ".checked_neg(), None);", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn checked_neg(self) -> Option<Self> {
+ let (a, b) = self.overflowing_neg();
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked shift left. Computes `self << rhs`, returning `None`
+if `rhs` is larger than or equal to the number of bits in `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0x1", stringify!($SelfT), ".checked_shl(4), Some(0x10));
+assert_eq!(0x10", stringify!($SelfT), ".checked_shl(129), None);", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shl(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked shift right. Computes `self >> rhs`, returning `None`
+if `rhs` is larger than or equal to the number of bits in `self`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".checked_shr(4), Some(0x1));
+assert_eq!(0x10", stringify!($SelfT), ".checked_shr(129), None);", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
+ let (a, b) = self.overflowing_shr(rhs);
+ if unlikely!(b) {None} else {Some(a)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Checked exponentiation. Computes `self.pow(exp)`, returning `None` if
+overflow occurred.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(2", stringify!($SelfT), ".checked_pow(5), Some(32));
+assert_eq!(", stringify!($SelfT), "::MAX.checked_pow(2), None);", $EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
+ if exp == 0 {
+ return Some(1);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = try_opt!(acc.checked_mul(base));
+ }
+ exp /= 2;
+ base = try_opt!(base.checked_mul(base));
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+
+ Some(try_opt!(acc.checked_mul(base)))
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer addition. Computes `self + rhs`, saturating at
+the numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101);
+assert_eq!(", stringify!($SelfT), "::MAX.saturating_add(127), ", stringify!($SelfT), "::MAX);",
+$EndFeature, "
+```"),
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn saturating_add(self, rhs: Self) -> Self {
+ intrinsics::saturating_add(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer subtraction. Computes `self - rhs`, saturating
+at the numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_sub(27), 73);
+assert_eq!(13", stringify!($SelfT), ".saturating_sub(127), 0);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[inline]
+ pub const fn saturating_sub(self, rhs: Self) -> Self {
+ intrinsics::saturating_sub(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer multiplication. Computes `self * rhs`,
+saturating at the numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(2", stringify!($SelfT), ".saturating_mul(10), 20);
+assert_eq!((", stringify!($SelfT), "::MAX).saturating_mul(10), ", stringify!($SelfT),
+"::MAX);", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_saturating_int_methods", since = "1.47.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_mul(self, rhs: Self) -> Self {
+ match self.checked_mul(rhs) {
+ Some(x) => x,
+ None => Self::MAX,
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Saturating integer exponentiation. Computes `self.pow(exp)`,
+saturating at the numeric bounds instead of overflowing.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "
+assert_eq!(4", stringify!($SelfT), ".saturating_pow(3), 64);
+assert_eq!(", stringify!($SelfT), "::MAX.saturating_pow(2), ", stringify!($SelfT), "::MAX);",
+$EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn saturating_pow(self, exp: u32) -> Self {
+ match self.checked_pow(exp) {
+ Some(x) => x,
+ None => Self::MAX,
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) addition. Computes `self + rhs`,
+wrapping around at the boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(200", stringify!($SelfT), ".wrapping_add(55), 255);
+assert_eq!(200", stringify!($SelfT), ".wrapping_add(", stringify!($SelfT), "::MAX), 199);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_add(self, rhs: Self) -> Self {
+ intrinsics::wrapping_add(self, rhs)
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) subtraction. Computes `self - rhs`,
+wrapping around at the boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_sub(100), 0);
+assert_eq!(100", stringify!($SelfT), ".wrapping_sub(", stringify!($SelfT), "::MAX), 101);",
+$EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_sub(self, rhs: Self) -> Self {
+ intrinsics::wrapping_sub(self, rhs)
+ }
+ }
+
+ /// Wrapping (modular) multiplication. Computes `self *
+ /// rhs`, wrapping around at the boundary of the type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `u8` is used here.
+ ///
+ /// ```
+ /// assert_eq!(10u8.wrapping_mul(12), 120);
+ /// assert_eq!(25u8.wrapping_mul(12), 44);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_mul(self, rhs: Self) -> Self {
+ intrinsics::wrapping_mul(self, rhs)
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) division. Computes `self / rhs`.
+Wrapped division on unsigned types is just normal division.
+There's no way wrapping could ever happen.
+This function exists, so that all operations
+are accounted for in the wrapping operations.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_div(10), 10);", $EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_unstable(feature = "const_wrapping_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_div(self, rhs: Self) -> Self {
+ self / rhs
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping Euclidean division. Computes `self.div_euclid(rhs)`.
+Wrapped division on unsigned types is just normal division.
+There's no way wrapping could ever happen.
+This function exists, so that all operations
+are accounted for in the wrapping operations.
+Since, for the positive integers, all common
+definitions of division are equal, this
+is exactly equal to `self.wrapping_div(rhs)`.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(100", stringify!($SelfT), ".wrapping_div_euclid(10), 10);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_div_euclid(self, rhs: Self) -> Self {
+ self / rhs
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) remainder. Computes `self % rhs`.
+Wrapped remainder calculation on unsigned types is
+just the regular remainder calculation.
+There's no way wrapping could ever happen.
+This function exists, so that all operations
+are accounted for in the wrapping operations.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_rem(10), 0);", $EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_unstable(feature = "const_wrapping_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping Euclidean modulo. Computes `self.rem_euclid(rhs)`.
+Wrapped modulo calculation on unsigned types is
+just the regular remainder calculation.
+There's no way wrapping could ever happen.
+This function exists, so that all operations
+are accounted for in the wrapping operations.
+Since, for the positive integers, all common
+definitions of division are equal, this
+is exactly equal to `self.wrapping_rem(rhs)`.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(100", stringify!($SelfT), ".wrapping_rem_euclid(10), 0);
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_rem_euclid(self, rhs: Self) -> Self {
+ self % rhs
+ }
+ }
+
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Since unsigned types do not have negative equivalents
+ /// all applications of this function will wrap (except for `-0`).
+ /// For values smaller than the corresponding signed type's maximum
+ /// the result is the same as casting the corresponding signed value.
+ /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where
+ /// `MAX` is the corresponding signed type's maximum.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `i8` is used here.
+ ///
+ /// ```
+ /// assert_eq!(100i8.wrapping_neg(), -100);
+ /// assert_eq!((-128i8).wrapping_neg(), -128);
+ /// ```
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[inline]
+ pub const fn wrapping_neg(self) -> Self {
+ self.overflowing_neg().0
+ }
+
+ doc_comment! {
+ concat!("Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+where `mask` removes any high-order bits of `rhs` that
+would cause the shift to exceed the bitwidth of the type.
+
+Note that this is *not* the same as a rotate-left; the
+RHS of a wrapping shift-left is restricted to the range
+of the type, rather than the bits shifted out of the LHS
+being returned to the other end. The primitive integer
+types all implement a [`rotate_left`](#method.rotate_left) function,
+which may be what you want instead.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(1", stringify!($SelfT), ".wrapping_shl(7), 128);
+assert_eq!(1", stringify!($SelfT), ".wrapping_shl(128), 1);", $EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_shl(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+where `mask` removes any high-order bits of `rhs` that
+would cause the shift to exceed the bitwidth of the type.
+
+Note that this is *not* the same as a rotate-right; the
+RHS of a wrapping shift-right is restricted to the range
+of the type, rather than the bits shifted out of the LHS
+being returned to the other end. The primitive integer
+types all implement a [`rotate_right`](#method.rotate_right) function,
+which may be what you want instead.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(128", stringify!($SelfT), ".wrapping_shr(7), 1);
+assert_eq!(128", stringify!($SelfT), ".wrapping_shr(128), 128);", $EndFeature, "
+```"),
+ #[stable(feature = "num_wrapping", since = "1.2.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_shr(self, rhs: u32) -> Self {
+ // SAFETY: the masking by the bitsize of the type ensures that we do not shift
+ // out of bounds
+ unsafe {
+ intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Wrapping (modular) exponentiation. Computes `self.pow(exp)`,
+wrapping around at the boundary of the type.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(3", stringify!($SelfT), ".wrapping_pow(5), 243);
+assert_eq!(3u8.wrapping_pow(6), 217);", $EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn wrapping_pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc.wrapping_mul(base);
+ }
+ exp /= 2;
+ base = base.wrapping_mul(base);
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc.wrapping_mul(base)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates `self` + `rhs`
+
+Returns a tuple of the addition along with a boolean indicating
+whether an arithmetic overflow would occur. If an overflow would
+have occurred then the wrapped value is returned.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false));
+assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates `self` - `rhs`
+
+Returns a tuple of the subtraction along with a boolean indicating
+whether an arithmetic overflow would occur. If an overflow would
+have occurred then the wrapped value is returned.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "
+assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false));
+assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));",
+$EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+ }
+
+ /// Calculates the multiplication of `self` and `rhs`.
+ ///
+ /// Returns a tuple of the multiplication along with a boolean
+ /// indicating whether an arithmetic overflow would occur. If an
+ /// overflow would have occurred then the wrapped value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `u32` is used here.
+ ///
+ /// ```
+ /// assert_eq!(5u32.overflowing_mul(2), (10, false));
+ /// assert_eq!(1_000_000_000u32.overflowing_mul(10), (1410065408, true));
+ /// ```
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
+ let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
+ (a as Self, b)
+ }
+
+ doc_comment! {
+ concat!("Calculates the divisor when `self` is divided by `rhs`.
+
+Returns a tuple of the divisor along with a boolean indicating
+whether an arithmetic overflow would occur. Note that for unsigned
+integers overflow never occurs, so the second value is always
+`false`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));", $EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_overflowing_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div(self, rhs: Self) -> (Self, bool) {
+ (self / rhs, false)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the quotient of Euclidean division `self.div_euclid(rhs)`.
+
+Returns a tuple of the divisor along with a boolean indicating
+whether an arithmetic overflow would occur. Note that for unsigned
+integers overflow never occurs, so the second value is always
+`false`.
+Since, for the positive integers, all common
+definitions of division are equal, this
+is exactly equal to `self.overflowing_div(rhs)`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage
+
+```
+assert_eq!(5", stringify!($SelfT), ".overflowing_div_euclid(2), (2, false));
+```"),
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
+ (self / rhs, false)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the remainder when `self` is divided by `rhs`.
+
+Returns a tuple of the remainder after dividing along with a boolean
+indicating whether an arithmetic overflow would occur. Note that for
+unsigned integers overflow never occurs, so the second value is
+always `false`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));", $EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_unstable(feature = "const_overflowing_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
+ (self % rhs, false)
+ }
+ }
+
+ doc_comment! {
+ concat!("Calculates the remainder `self.rem_euclid(rhs)` as if by Euclidean division.
+
+Returns a tuple of the modulo after dividing along with a boolean
+indicating whether an arithmetic overflow would occur. Note that for
+unsigned integers overflow never occurs, so the second value is
+always `false`.
+Since, for the positive integers, all common
+definitions of division are equal, this operation
+is exactly equal to `self.overflowing_rem(rhs)`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage
+
+```
+assert_eq!(5", stringify!($SelfT), ".overflowing_rem_euclid(2), (1, false));
+```"),
+ #[inline]
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ pub const fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
+ (self % rhs, false)
+ }
+ }
+
+ doc_comment! {
+ concat!("Negates self in an overflowing fashion.
+
+Returns `!self + 1` using wrapping operations to return the value
+that represents the negation of this unsigned value. Note that for
+positive unsigned values overflow always occurs, but negating 0 does
+not overflow.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "assert_eq!(0", stringify!($SelfT), ".overflowing_neg(), (0, false));
+assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2i32 as ", stringify!($SelfT),
+", true));", $EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ pub const fn overflowing_neg(self) -> (Self, bool) {
+ ((!self).wrapping_add(1), self != 0)
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts self left by `rhs` bits.
+
+Returns a tuple of the shifted version of self along with a boolean
+indicating whether the shift value was larger than or equal to the
+number of bits. If the shift value is too large, then value is
+masked (N-1) where N is the number of bits, and this value is then
+used to perform the shift.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(4), (0x10, false));
+assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(132), (0x10, true));", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shl(rhs), (rhs > ($BITS - 1)))
+ }
+ }
+
+ doc_comment! {
+ concat!("Shifts self right by `rhs` bits.
+
+Returns a tuple of the shifted version of self along with a boolean
+indicating whether the shift value was larger than or equal to the
+number of bits. If the shift value is too large, then value is
+masked (N-1) where N is the number of bits, and this value is then
+used to perform the shift.
+
+# Examples
+
+Basic usage
+
+```
+", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(4), (0x1, false));
+assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(132), (0x1, true));", $EndFeature, "
+```"),
+ #[stable(feature = "wrapping", since = "1.7.0")]
+ #[rustc_const_stable(feature = "const_wrapping_math", since = "1.32.0")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) {
+ (self.wrapping_shr(rhs), (rhs > ($BITS - 1)))
+ }
+ }
+
+ doc_comment! {
+ concat!("Raises self to the power of `exp`, using exponentiation by squaring.
+
+Returns a tuple of the exponentiation along with a bool indicating
+whether an overflow happened.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(3", stringify!($SelfT), ".overflowing_pow(5), (243, false));
+assert_eq!(3u8.overflowing_pow(6), (217, true));", $EndFeature, "
+```"),
+ #[stable(feature = "no_panic_pow", since = "1.34.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ pub const fn overflowing_pow(self, mut exp: u32) -> (Self, bool) {
+ if exp == 0{
+ return (1,false);
+ }
+ let mut base = self;
+ let mut acc: Self = 1;
+ let mut overflown = false;
+ // Scratch space for storing results of overflowing_mul.
+ let mut r;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ r = acc.overflowing_mul(base);
+ acc = r.0;
+ overflown |= r.1;
+ }
+ exp /= 2;
+ r = base.overflowing_mul(base);
+ base = r.0;
+ overflown |= r.1;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ r = acc.overflowing_mul(base);
+ r.1 |= overflown;
+
+ r
+ }
+ }
+
+ doc_comment! {
+ concat!("Raises self to the power of `exp`, using exponentiation by squaring.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(2", stringify!($SelfT), ".pow(5), 32);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn pow(self, mut exp: u32) -> Self {
+ if exp == 0 {
+ return 1;
+ }
+ let mut base = self;
+ let mut acc = 1;
+
+ while exp > 1 {
+ if (exp & 1) == 1 {
+ acc = acc * base;
+ }
+ exp /= 2;
+ base = base * base;
+ }
+
+ // since exp!=0, finally the exp must be 1.
+ // Deal with the final bit of the exponent separately, since
+ // squaring the base afterwards is not necessary and may cause a
+ // needless overflow.
+ acc * base
+ }
+ }
+
+ doc_comment! {
+ concat!("Performs Euclidean division.
+
+Since, for the positive integers, all common
+definitions of division are equal, this
+is exactly equal to `self / rhs`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(7", stringify!($SelfT), ".div_euclid(4), 1); // or any other integer type
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn div_euclid(self, rhs: Self) -> Self {
+ self / rhs
+ }
+ }
+
+
+ doc_comment! {
+ concat!("Calculates the least remainder of `self (mod rhs)`.
+
+Since, for the positive integers, all common
+definitions of division are equal, this
+is exactly equal to `self % rhs`.
+
+# Panics
+
+This function will panic if `rhs` is 0.
+
+# Examples
+
+Basic usage:
+
+```
+assert_eq!(7", stringify!($SelfT), ".rem_euclid(4), 3); // or any other integer type
+```"),
+ #[stable(feature = "euclidean_division", since = "1.38.0")]
+ #[rustc_const_unstable(feature = "const_euclidean_int_methods", issue = "53718")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn rem_euclid(self, rhs: Self) -> Self {
+ self % rhs
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns `true` if and only if `self == 2^k` for some `k`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert!(16", stringify!($SelfT), ".is_power_of_two());
+assert!(!10", stringify!($SelfT), ".is_power_of_two());", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_is_power_of_two", since = "1.32.0")]
+ #[inline]
+ pub const fn is_power_of_two(self) -> bool {
+ self.count_ones() == 1
+ }
+ }
+
+ // Returns one less than next power of two.
+ // (For 8u8 next power of two is 8u8 and for 6u8 it is 8u8)
+ //
+ // 8u8.one_less_than_next_power_of_two() == 7
+ // 6u8.one_less_than_next_power_of_two() == 7
+ //
+ // This method cannot overflow, as in the `next_power_of_two`
+ // overflow cases it instead ends up returning the maximum value
+ // of the type, and can return 0 for 0.
+ #[inline]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ const fn one_less_than_next_power_of_two(self) -> Self {
+ if self <= 1 { return 0; }
+
+ let p = self - 1;
+ // SAFETY: Because `p > 0`, it cannot consist entirely of leading zeros.
+ // That means the shift is always in-bounds, and some processors
+ // (such as intel pre-haswell) have more efficient ctlz
+ // intrinsics when the argument is non-zero.
+ let z = unsafe { intrinsics::ctlz_nonzero(p) };
+ <$SelfT>::MAX >> z
+ }
+
+ doc_comment! {
+ concat!("Returns the smallest power of two greater than or equal to `self`.
+
+When return value overflows (i.e., `self > (1 << (N-1))` for type
+`uN`), it panics in debug mode and return value is wrapped to 0 in
+release mode (the only situation in which method can return 0).
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(2", stringify!($SelfT), ".next_power_of_two(), 2);
+assert_eq!(3", stringify!($SelfT), ".next_power_of_two(), 4);", $EndFeature, "
+```"),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ pub const fn next_power_of_two(self) -> Self {
+ self.one_less_than_next_power_of_two() + 1
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the smallest power of two greater than or equal to `n`. If
+the next power of two is greater than the type's maximum value,
+`None` is returned, otherwise the power of two is wrapped in `Some`.
+
+# Examples
+
+Basic usage:
+
+```
+", $Feature, "assert_eq!(2", stringify!($SelfT),
+".checked_next_power_of_two(), Some(2));
+assert_eq!(3", stringify!($SelfT), ".checked_next_power_of_two(), Some(4));
+assert_eq!(", stringify!($SelfT), "::MAX.checked_next_power_of_two(), None);",
+$EndFeature, "
+```"),
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ pub const fn checked_next_power_of_two(self) -> Option<Self> {
+ self.one_less_than_next_power_of_two().checked_add(1)
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the smallest power of two greater than or equal to `n`. If
+the next power of two is greater than the type's maximum value,
+the return value is wrapped to `0`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_next_power_of_two)]
+", $Feature, "
+assert_eq!(2", stringify!($SelfT), ".wrapping_next_power_of_two(), 2);
+assert_eq!(3", stringify!($SelfT), ".wrapping_next_power_of_two(), 4);
+assert_eq!(", stringify!($SelfT), "::MAX.wrapping_next_power_of_two(), 0);",
+$EndFeature, "
+```"),
+ #[unstable(feature = "wrapping_next_power_of_two", issue = "32463",
+ reason = "needs decision on wrapping behaviour")]
+ #[rustc_const_unstable(feature = "const_int_pow", issue = "53718")]
+ pub const fn wrapping_next_power_of_two(self) -> Self {
+ self.one_less_than_next_power_of_two().wrapping_add(1)
+ }
+ }
+
+ doc_comment! {
+ concat!("Return the memory representation of this integer as a byte array in
+big-endian (network) byte order.
+",
+$to_xe_bytes_doc,
+"
+# Examples
+
+```
+let bytes = ", $swap_op, stringify!($SelfT), ".to_be_bytes();
+assert_eq!(bytes, ", $be_bytes, ");
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn to_be_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_be().to_ne_bytes()
+ }
+ }
+
+ doc_comment! {
+ concat!("Return the memory representation of this integer as a byte array in
+little-endian byte order.
+",
+$to_xe_bytes_doc,
+"
+# Examples
+
+```
+let bytes = ", $swap_op, stringify!($SelfT), ".to_le_bytes();
+assert_eq!(bytes, ", $le_bytes, ");
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn to_le_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ self.to_le().to_ne_bytes()
+ }
+ }
+
+ doc_comment! {
+ concat!("
+Return the memory representation of this integer as a byte array in
+native byte order.
+
+As the target platform's native endianness is used, portable code
+should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate,
+instead.
+",
+$to_xe_bytes_doc,
+"
+[`to_be_bytes`]: #method.to_be_bytes
+[`to_le_bytes`]: #method.to_le_bytes
+
+# Examples
+
+```
+let bytes = ", $swap_op, stringify!($SelfT), ".to_ne_bytes();
+assert_eq!(
+ bytes,
+ if cfg!(target_endian = \"big\") {
+ ", $be_bytes, "
+ } else {
+ ", $le_bytes, "
+ }
+);
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute them to arrays of bytes
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))]
+ #[inline]
+ pub const fn to_ne_bytes(self) -> [u8; mem::size_of::<Self>()] {
+ // SAFETY: integers are plain old datatypes so we can always transmute them to
+ // arrays of bytes
+ unsafe { mem::transmute(self) }
+ }
+ }
+
+ doc_comment! {
+ concat!("
+Return the memory representation of this integer as a byte array in
+native byte order.
+
+[`to_ne_bytes`] should be preferred over this whenever possible.
+
+[`to_ne_bytes`]: #method.to_ne_bytes
+",
+
+"
+# Examples
+
+```
+#![feature(num_as_ne_bytes)]
+let num = ", $swap_op, stringify!($SelfT), ";
+let bytes = num.as_ne_bytes();
+assert_eq!(
+ bytes,
+ if cfg!(target_endian = \"big\") {
+ &", $be_bytes, "
+ } else {
+ &", $le_bytes, "
+ }
+);
+```"),
+ #[unstable(feature = "num_as_ne_bytes", issue = "76976")]
+ #[inline]
+ pub fn as_ne_bytes(&self) -> &[u8; mem::size_of::<Self>()] {
+ // SAFETY: integers are plain old datatypes so we can always transmute them to
+ // arrays of bytes
+ unsafe { &*(self as *const Self as *const _) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Create a native endian integer value from its representation
+as a byte array in big endian.
+",
+$from_xe_bytes_doc,
+"
+# Examples
+
+```
+let value = ", stringify!($SelfT), "::from_be_bytes(", $be_bytes, ");
+assert_eq!(value, ", $swap_op, ");
+```
+
+When starting from a slice rather than an array, fallible conversion APIs can be used:
+
+```
+use std::convert::TryInto;
+
+fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {
+ let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());
+ *input = rest;
+ ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap())
+}
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn from_be_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_be(Self::from_ne_bytes(bytes))
+ }
+ }
+
+ doc_comment! {
+ concat!("
+Create a native endian integer value from its representation
+as a byte array in little endian.
+",
+$from_xe_bytes_doc,
+"
+# Examples
+
+```
+let value = ", stringify!($SelfT), "::from_le_bytes(", $le_bytes, ");
+assert_eq!(value, ", $swap_op, ");
+```
+
+When starting from a slice rather than an array, fallible conversion APIs can be used:
+
+```
+use std::convert::TryInto;
+
+fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {
+ let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());
+ *input = rest;
+ ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap())
+}
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ #[inline]
+ pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ Self::from_le(Self::from_ne_bytes(bytes))
+ }
+ }
+
+ doc_comment! {
+ concat!("Create a native endian integer value from its memory representation
+as a byte array in native endianness.
+
+As the target platform's native endianness is used, portable code
+likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
+appropriate instead.
+
+[`from_be_bytes`]: #method.from_be_bytes
+[`from_le_bytes`]: #method.from_le_bytes
+",
+$from_xe_bytes_doc,
+"
+# Examples
+
+```
+let value = ", stringify!($SelfT), "::from_ne_bytes(if cfg!(target_endian = \"big\") {
+ ", $be_bytes, "
+} else {
+ ", $le_bytes, "
+});
+assert_eq!(value, ", $swap_op, ");
+```
+
+When starting from a slice rather than an array, fallible conversion APIs can be used:
+
+```
+use std::convert::TryInto;
+
+fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " {
+ let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">());
+ *input = rest;
+ ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap())
+}
+```"),
+ #[stable(feature = "int_to_from_bytes", since = "1.32.0")]
+ #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")]
+ // SAFETY: const sound because integers are plain old datatypes so we can always
+ // transmute to them
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))]
+ #[inline]
+ pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+ // SAFETY: integers are plain old datatypes so we can always transmute to them
+ unsafe { mem::transmute(bytes) }
+ }
+ }
+
+ doc_comment! {
+ concat!("**This method is soft-deprecated.**
+
+Although using it won’t cause compilation warning,
+new code should use [`", stringify!($SelfT), "::MIN", "`](#associatedconstant.MIN) instead.
+
+Returns the smallest value that can be represented by this integer type."),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_promotable]
+ #[inline(always)]
+ #[rustc_const_stable(feature = "const_max_value", since = "1.32.0")]
+ pub const fn min_value() -> Self { Self::MIN }
+ }
+
+ doc_comment! {
+ concat!("**This method is soft-deprecated.**
+
+Although using it won’t cause compilation warning,
+new code should use [`", stringify!($SelfT), "::MAX", "`](#associatedconstant.MAX) instead.
+
+Returns the largest value that can be represented by this integer type."),
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_promotable]
+ #[inline(always)]
+ #[rustc_const_stable(feature = "const_max_value", since = "1.32.0")]
+ pub const fn max_value() -> Self { Self::MAX }
+ }
+ }
+}
--- /dev/null
+//! Definitions of `Wrapping<T>`.
+
+use crate::fmt;
+use crate::ops::{Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign};
+use crate::ops::{BitXor, BitXorAssign, Div, DivAssign};
+use crate::ops::{Mul, MulAssign, Neg, Not, Rem, RemAssign};
+use crate::ops::{Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign};
+
+/// Provides intentionally-wrapped arithmetic on `T`.
+///
+/// Operations like `+` on `u32` values are intended to never overflow,
+/// and in some debug configurations overflow is detected and results
+/// in a panic. While most arithmetic falls into this category, some
+/// code explicitly expects and relies upon modular arithmetic (e.g.,
+/// hashing).
+///
+/// Wrapping arithmetic can be achieved either through methods like
+/// `wrapping_add`, or through the `Wrapping<T>` type, which says that
+/// all standard arithmetic operations on the underlying value are
+/// intended to have wrapping semantics.
+///
+/// The underlying value can be retrieved through the `.0` index of the
+/// `Wrapping` tuple.
+///
+/// # Examples
+///
+/// ```
+/// use std::num::Wrapping;
+///
+/// let zero = Wrapping(0u32);
+/// let one = Wrapping(1u32);
+///
+/// assert_eq!(u32::MAX, (zero - one).0);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)]
+#[repr(transparent)]
+pub struct Wrapping<T>(#[stable(feature = "rust1", since = "1.0.0")] pub T);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug> fmt::Debug for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_display", since = "1.10.0")]
+impl<T: fmt::Display> fmt::Display for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::Binary> fmt::Binary for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::Octal> fmt::Octal for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::LowerHex> fmt::LowerHex for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[stable(feature = "wrapping_fmt", since = "1.11.0")]
+impl<T: fmt::UpperHex> fmt::UpperHex for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+#[allow(unused_macros)]
+macro_rules! sh_impl_signed {
+ ($t:ident, $f:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shl<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shl(self, other: $f) -> Wrapping<$t> {
+ if other < 0 {
+ Wrapping(self.0.wrapping_shr((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+ forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl ShlAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+ forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shr<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shr(self, other: $f) -> Wrapping<$t> {
+ if other < 0 {
+ Wrapping(self.0.wrapping_shl((-other & self::shift_max::$t as $f) as u32))
+ } else {
+ Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ }
+ forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl ShrAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f }
+ };
+}
+
+macro_rules! sh_impl_unsigned {
+ ($t:ident, $f:ident) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shl<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shl(self, other: $f) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl ShlAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shl_assign(&mut self, other: $f) {
+ *self = *self << other;
+ }
+ }
+ forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shr<$f> for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn shr(self, other: $f) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
+ }
+ }
+ forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f,
+ #[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl ShrAssign<$f> for Wrapping<$t> {
+ #[inline]
+ fn shr_assign(&mut self, other: $f) {
+ *self = *self >> other;
+ }
+ }
+ forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f }
+ };
+}
+
+// FIXME (#23545): uncomment the remaining impls
+macro_rules! sh_impl_all {
+ ($($t:ident)*) => ($(
+ //sh_impl_unsigned! { $t, u8 }
+ //sh_impl_unsigned! { $t, u16 }
+ //sh_impl_unsigned! { $t, u32 }
+ //sh_impl_unsigned! { $t, u64 }
+ //sh_impl_unsigned! { $t, u128 }
+ sh_impl_unsigned! { $t, usize }
+
+ //sh_impl_signed! { $t, i8 }
+ //sh_impl_signed! { $t, i16 }
+ //sh_impl_signed! { $t, i32 }
+ //sh_impl_signed! { $t, i64 }
+ //sh_impl_signed! { $t, i128 }
+ //sh_impl_signed! { $t, isize }
+ )*)
+}
+
+sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+// FIXME(30524): impl Op<T> for Wrapping<T>, impl OpAssign<T> for Wrapping<T>
+macro_rules! wrapping_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Add for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn add(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_add(other.0))
+ }
+ }
+ forward_ref_binop! { impl Add, add for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl AddAssign for Wrapping<$t> {
+ #[inline]
+ fn add_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self + other;
+ }
+ }
+ forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Sub for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn sub(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_sub(other.0))
+ }
+ }
+ forward_ref_binop! { impl Sub, sub for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl SubAssign for Wrapping<$t> {
+ #[inline]
+ fn sub_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self - other;
+ }
+ }
+ forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Mul for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn mul(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_mul(other.0))
+ }
+ }
+ forward_ref_binop! { impl Mul, mul for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl MulAssign for Wrapping<$t> {
+ #[inline]
+ fn mul_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self * other;
+ }
+ }
+ forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_div", since = "1.3.0")]
+ impl Div for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn div(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_div(other.0))
+ }
+ }
+ forward_ref_binop! { impl Div, div for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl DivAssign for Wrapping<$t> {
+ #[inline]
+ fn div_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self / other;
+ }
+ }
+ forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ impl Rem for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn rem(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_rem(other.0))
+ }
+ }
+ forward_ref_binop! { impl Rem, rem for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl RemAssign for Wrapping<$t> {
+ #[inline]
+ fn rem_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self % other;
+ }
+ }
+ forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Not for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn not(self) -> Wrapping<$t> {
+ Wrapping(!self.0)
+ }
+ }
+ forward_ref_unop! { impl Not, not for Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitXor for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn bitxor(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 ^ other.0)
+ }
+ }
+ forward_ref_binop! { impl BitXor, bitxor for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitXorAssign for Wrapping<$t> {
+ #[inline]
+ fn bitxor_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self ^ other;
+ }
+ }
+ forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitOr for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn bitor(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 | other.0)
+ }
+ }
+ forward_ref_binop! { impl BitOr, bitor for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitOrAssign for Wrapping<$t> {
+ #[inline]
+ fn bitor_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self | other;
+ }
+ }
+ forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitAnd for Wrapping<$t> {
+ type Output = Wrapping<$t>;
+
+ #[inline]
+ fn bitand(self, other: Wrapping<$t>) -> Wrapping<$t> {
+ Wrapping(self.0 & other.0)
+ }
+ }
+ forward_ref_binop! { impl BitAnd, bitand for Wrapping<$t>, Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitAndAssign for Wrapping<$t> {
+ #[inline]
+ fn bitand_assign(&mut self, other: Wrapping<$t>) {
+ *self = *self & other;
+ }
+ }
+ forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t> }
+
+ #[stable(feature = "wrapping_neg", since = "1.10.0")]
+ impl Neg for Wrapping<$t> {
+ type Output = Self;
+ #[inline]
+ fn neg(self) -> Self {
+ Wrapping(0) - self
+ }
+ }
+ forward_ref_unop! { impl Neg, neg for Wrapping<$t>,
+ #[stable(feature = "wrapping_ref", since = "1.14.0")] }
+
+ )*)
+}
+
+wrapping_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! wrapping_int_impl {
+ ($($t:ty)*) => ($(
+ impl Wrapping<$t> {
+ doc_comment! {
+ concat!("Returns the smallest value that can be represented by this integer type.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(<Wrapping<", stringify!($t), ">>::MIN, Wrapping(", stringify!($t), "::MIN));
+```"),
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const MIN: Self = Self(<$t>::MIN);
+ }
+
+ doc_comment! {
+ concat!("Returns the largest value that can be represented by this integer type.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(<Wrapping<", stringify!($t), ">>::MAX, Wrapping(", stringify!($t), "::MAX));
+```"),
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const MAX: Self = Self(<$t>::MAX);
+ }
+
+ doc_comment! {
+ concat!("Returns the number of ones in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(0b01001100", stringify!($t), ");
+
+assert_eq!(n.count_ones(), 3);
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn count_ones(self) -> u32 {
+ self.0.count_ones()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(Wrapping(!0", stringify!($t), ").count_zeros(), 0);
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn count_zeros(self) -> u32 {
+ self.0.count_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the number of trailing zeros in the binary representation
+of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(0b0101000", stringify!($t), ");
+
+assert_eq!(n.trailing_zeros(), 3);
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn trailing_zeros(self) -> u32 {
+ self.0.trailing_zeros()
+ }
+ }
+
+ /// Shifts the bits to the left by a specified amount, `n`,
+ /// wrapping the truncated bits to the end of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `<<` shifting
+ /// operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// let n: Wrapping<i64> = Wrapping(0x0123456789ABCDEF);
+ /// let m: Wrapping<i64> = Wrapping(-0x76543210FEDCBA99);
+ ///
+ /// assert_eq!(n.rotate_left(32), m);
+ /// ```
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn rotate_left(self, n: u32) -> Self {
+ Wrapping(self.0.rotate_left(n))
+ }
+
+ /// Shifts the bits to the right by a specified amount, `n`,
+ /// wrapping the truncated bits to the beginning of the resulting
+ /// integer.
+ ///
+ /// Please note this isn't the same operation as the `>>` shifting
+ /// operator!
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// let n: Wrapping<i64> = Wrapping(0x0123456789ABCDEF);
+ /// let m: Wrapping<i64> = Wrapping(-0xFEDCBA987654322);
+ ///
+ /// assert_eq!(n.rotate_right(4), m);
+ /// ```
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn rotate_right(self, n: u32) -> Self {
+ Wrapping(self.0.rotate_right(n))
+ }
+
+ /// Reverses the byte order of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(wrapping_int_impl)]
+ /// use std::num::Wrapping;
+ ///
+ /// let n: Wrapping<i16> = Wrapping(0b0000000_01010101);
+ /// assert_eq!(n, Wrapping(85));
+ ///
+ /// let m = n.swap_bytes();
+ ///
+ /// assert_eq!(m, Wrapping(0b01010101_00000000));
+ /// assert_eq!(m, Wrapping(21760));
+ /// ```
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn swap_bytes(self) -> Self {
+ Wrapping(self.0.swap_bytes())
+ }
+
+ /// Reverses the bit pattern of the integer.
+ ///
+ /// # Examples
+ ///
+ /// Please note that this example is shared between integer types.
+ /// Which explains why `i16` is used here.
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::num::Wrapping;
+ ///
+ /// let n = Wrapping(0b0000000_01010101i16);
+ /// assert_eq!(n, Wrapping(85));
+ ///
+ /// let m = n.reverse_bits();
+ ///
+ /// assert_eq!(m.0 as u16, 0b10101010_00000000);
+ /// assert_eq!(m, Wrapping(-22016));
+ /// ```
+ #[stable(feature = "reverse_bits", since = "1.37.0")]
+ #[rustc_const_stable(feature = "const_reverse_bits", since = "1.37.0")]
+ #[inline]
+ #[must_use]
+ pub const fn reverse_bits(self) -> Self {
+ Wrapping(self.0.reverse_bits())
+ }
+
+ doc_comment! {
+ concat!("Converts an integer from big endian to the target's endianness.
+
+On big endian this is a no-op. On little endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(0x1A", stringify!($t), ");
+
+if cfg!(target_endian = \"big\") {
+ assert_eq!(<Wrapping<", stringify!($t), ">>::from_be(n), n)
+} else {
+ assert_eq!(<Wrapping<", stringify!($t), ">>::from_be(n), n.swap_bytes())
+}
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn from_be(x: Self) -> Self {
+ Wrapping(<$t>::from_be(x.0))
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts an integer from little endian to the target's endianness.
+
+On little endian this is a no-op. On big endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(0x1A", stringify!($t), ");
+
+if cfg!(target_endian = \"little\") {
+ assert_eq!(<Wrapping<", stringify!($t), ">>::from_le(n), n)
+} else {
+ assert_eq!(<Wrapping<", stringify!($t), ">>::from_le(n), n.swap_bytes())
+}
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn from_le(x: Self) -> Self {
+ Wrapping(<$t>::from_le(x.0))
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts `self` to big endian from the target's endianness.
+
+On big endian this is a no-op. On little endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(0x1A", stringify!($t), ");
+
+if cfg!(target_endian = \"big\") {
+ assert_eq!(n.to_be(), n)
+} else {
+ assert_eq!(n.to_be(), n.swap_bytes())
+}
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn to_be(self) -> Self {
+ Wrapping(self.0.to_be())
+ }
+ }
+
+ doc_comment! {
+ concat!("Converts `self` to little endian from the target's endianness.
+
+On little endian this is a no-op. On big endian the bytes are
+swapped.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(0x1A", stringify!($t), ");
+
+if cfg!(target_endian = \"little\") {
+ assert_eq!(n.to_le(), n)
+} else {
+ assert_eq!(n.to_le(), n.swap_bytes())
+}
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn to_le(self) -> Self {
+ Wrapping(self.0.to_le())
+ }
+ }
+
+ doc_comment! {
+ concat!("Raises self to the power of `exp`, using exponentiation by squaring.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(Wrapping(3", stringify!($t), ").pow(4), Wrapping(81));
+```
+
+Results that are too large are wrapped:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(Wrapping(3i8).pow(5), Wrapping(-13));
+assert_eq!(Wrapping(3i8).pow(6), Wrapping(-39));
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn pow(self, exp: u32) -> Self {
+ Wrapping(self.0.wrapping_pow(exp))
+ }
+ }
+ }
+ )*)
+}
+
+wrapping_int_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! wrapping_int_impl_signed {
+ ($($t:ty)*) => ($(
+ impl Wrapping<$t> {
+ doc_comment! {
+ concat!("Returns the number of leading zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(", stringify!($t), "::MAX) >> 2;
+
+assert_eq!(n.leading_zeros(), 3);
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn leading_zeros(self) -> u32 {
+ self.0.leading_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Computes the absolute value of `self`, wrapping around at
+the boundary of the type.
+
+The only case where such wrapping can occur is when one takes the absolute value of the negative
+minimal value for the type this is a positive value that is too large to represent in the type. In
+such a case, this function returns `MIN` itself.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(Wrapping(100", stringify!($t), ").abs(), Wrapping(100));
+assert_eq!(Wrapping(-100", stringify!($t), ").abs(), Wrapping(100));
+assert_eq!(Wrapping(", stringify!($t), "::MIN).abs(), Wrapping(", stringify!($t), "::MIN));
+assert_eq!(Wrapping(-128i8).abs().0 as u8, 128u8);
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn abs(self) -> Wrapping<$t> {
+ Wrapping(self.0.wrapping_abs())
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns a number representing sign of `self`.
+
+ - `0` if the number is zero
+ - `1` if the number is positive
+ - `-1` if the number is negative
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert_eq!(Wrapping(10", stringify!($t), ").signum(), Wrapping(1));
+assert_eq!(Wrapping(0", stringify!($t), ").signum(), Wrapping(0));
+assert_eq!(Wrapping(-10", stringify!($t), ").signum(), Wrapping(-1));
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn signum(self) -> Wrapping<$t> {
+ Wrapping(self.0.signum())
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns `true` if `self` is positive and `false` if the number is zero or
+negative.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert!(Wrapping(10", stringify!($t), ").is_positive());
+assert!(!Wrapping(-10", stringify!($t), ").is_positive());
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn is_positive(self) -> bool {
+ self.0.is_positive()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns `true` if `self` is negative and `false` if the number is zero or
+positive.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert!(Wrapping(-10", stringify!($t), ").is_negative());
+assert!(!Wrapping(10", stringify!($t), ").is_negative());
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn is_negative(self) -> bool {
+ self.0.is_negative()
+ }
+ }
+ }
+ )*)
+}
+
+wrapping_int_impl_signed! { isize i8 i16 i32 i64 i128 }
+
+macro_rules! wrapping_int_impl_unsigned {
+ ($($t:ty)*) => ($(
+ impl Wrapping<$t> {
+ doc_comment! {
+ concat!("Returns the number of leading zeros in the binary representation of `self`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+let n = Wrapping(", stringify!($t), "::MAX) >> 2;
+
+assert_eq!(n.leading_zeros(), 2);
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub const fn leading_zeros(self) -> u32 {
+ self.0.leading_zeros()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns `true` if and only if `self == 2^k` for some `k`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_int_impl)]
+use std::num::Wrapping;
+
+assert!(Wrapping(16", stringify!($t), ").is_power_of_two());
+assert!(!Wrapping(10", stringify!($t), ").is_power_of_two());
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_int_impl", issue = "32463")]
+ pub fn is_power_of_two(self) -> bool {
+ self.0.is_power_of_two()
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns the smallest power of two greater than or equal to `self`.
+
+When return value overflows (i.e., `self > (1 << (N-1))` for type
+`uN`), overflows to `2^N = 0`.
+
+# Examples
+
+Basic usage:
+
+```
+#![feature(wrapping_next_power_of_two)]
+use std::num::Wrapping;
+
+assert_eq!(Wrapping(2", stringify!($t), ").next_power_of_two(), Wrapping(2));
+assert_eq!(Wrapping(3", stringify!($t), ").next_power_of_two(), Wrapping(4));
+assert_eq!(Wrapping(200_u8).next_power_of_two(), Wrapping(0));
+```"),
+ #[inline]
+ #[unstable(feature = "wrapping_next_power_of_two", issue = "32463",
+ reason = "needs decision on wrapping behaviour")]
+ pub fn next_power_of_two(self) -> Self {
+ Wrapping(self.0.wrapping_next_power_of_two())
+ }
+ }
+ }
+ )*)
+}
+
+wrapping_int_impl_unsigned! { usize u8 u16 u32 u64 u128 }
+
+mod shift_max {
+ #![allow(non_upper_case_globals)]
+
+ #[cfg(target_pointer_width = "16")]
+ mod platform {
+ pub const usize: u32 = super::u16;
+ pub const isize: u32 = super::i16;
+ }
+
+ #[cfg(target_pointer_width = "32")]
+ mod platform {
+ pub const usize: u32 = super::u32;
+ pub const isize: u32 = super::i32;
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ mod platform {
+ pub const usize: u32 = super::u64;
+ pub const isize: u32 = super::i64;
+ }
+
+ pub const i8: u32 = (1 << 3) - 1;
+ pub const i16: u32 = (1 << 4) - 1;
+ pub const i32: u32 = (1 << 5) - 1;
+ pub const i64: u32 = (1 << 6) - 1;
+ pub const i128: u32 = (1 << 7) - 1;
+ pub use self::platform::isize;
+
+ pub const u8: u32 = i8;
+ pub const u16: u32 = i16;
+ pub const u32: u32 = i32;
+ pub const u64: u32 = i64;
+ pub const u128: u32 = i128;
+ pub use self::platform::usize;
+}
--- /dev/null
+/// The addition operator `+`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory. For
+/// example, [`std::time::SystemTime`] implements `Add<Duration>`, which permits
+/// operations of the form `SystemTime = SystemTime + Duration`.
+///
+/// [`std::time::SystemTime`]: ../../std/time/struct.SystemTime.html
+///
+/// # Examples
+///
+/// ## `Add`able points
+///
+/// ```
+/// use std::ops::Add;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl Add for Point {
+/// type Output = Self;
+///
+/// fn add(self, other: Self) -> Self {
+/// Self {
+/// x: self.x + other.x,
+/// y: self.y + other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
+/// Point { x: 3, y: 3 });
+/// ```
+///
+/// ## Implementing `Add` with generics
+///
+/// Here is an example of the same `Point` struct implementing the `Add` trait
+/// using generics.
+///
+/// ```
+/// use std::ops::Add;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point<T> {
+/// x: T,
+/// y: T,
+/// }
+///
+/// // Notice that the implementation uses the associated type `Output`.
+/// impl<T: Add<Output = T>> Add for Point<T> {
+/// type Output = Self;
+///
+/// fn add(self, other: Self) -> Self::Output {
+/// Self {
+/// x: self.x + other.x,
+/// y: self.y + other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 },
+/// Point { x: 3, y: 3 });
+/// ```
+#[lang = "add"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ on(all(_Self = "{integer}", Rhs = "{float}"), message = "cannot add a float to an integer",),
+ on(all(_Self = "{float}", Rhs = "{integer}"), message = "cannot add an integer to a float",),
+ message = "cannot add `{Rhs}` to `{Self}`",
+ label = "no implementation for `{Self} + {Rhs}`"
+)]
+#[doc(alias = "+")]
+pub trait Add<Rhs = Self> {
+ /// The resulting type after applying the `+` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `+` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 + 1, 13);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn add(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! add_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Add for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn add(self, other: $t) -> $t { self + other }
+ }
+
+ forward_ref_binop! { impl Add, add for $t, $t }
+ )*)
+}
+
+add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The subtraction operator `-`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory. For
+/// example, [`std::time::SystemTime`] implements `Sub<Duration>`, which permits
+/// operations of the form `SystemTime = SystemTime - Duration`.
+///
+/// [`std::time::SystemTime`]: ../../std/time/struct.SystemTime.html
+///
+/// # Examples
+///
+/// ## `Sub`tractable points
+///
+/// ```
+/// use std::ops::Sub;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl Sub for Point {
+/// type Output = Self;
+///
+/// fn sub(self, other: Self) -> Self::Output {
+/// Self {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 },
+/// Point { x: 1, y: 0 });
+/// ```
+///
+/// ## Implementing `Sub` with generics
+///
+/// Here is an example of the same `Point` struct implementing the `Sub` trait
+/// using generics.
+///
+/// ```
+/// use std::ops::Sub;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Point<T> {
+/// x: T,
+/// y: T,
+/// }
+///
+/// // Notice that the implementation uses the associated type `Output`.
+/// impl<T: Sub<Output = T>> Sub for Point<T> {
+/// type Output = Self;
+///
+/// fn sub(self, other: Self) -> Self::Output {
+/// Point {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// }
+/// }
+/// }
+///
+/// assert_eq!(Point { x: 2, y: 3 } - Point { x: 1, y: 0 },
+/// Point { x: 1, y: 3 });
+/// ```
+#[lang = "sub"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot subtract `{Rhs}` from `{Self}`",
+ label = "no implementation for `{Self} - {Rhs}`"
+)]
+#[doc(alias = "-")]
+pub trait Sub<Rhs = Self> {
+ /// The resulting type after applying the `-` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `-` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 - 1, 11);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn sub(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! sub_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Sub for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn sub(self, other: $t) -> $t { self - other }
+ }
+
+ forward_ref_binop! { impl Sub, sub for $t, $t }
+ )*)
+}
+
+sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The multiplication operator `*`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// ## `Mul`tipliable rational numbers
+///
+/// ```
+/// use std::ops::Mul;
+///
+/// // By the fundamental theorem of arithmetic, rational numbers in lowest
+/// // terms are unique. So, by keeping `Rational`s in reduced form, we can
+/// // derive `Eq` and `PartialEq`.
+/// #[derive(Debug, Eq, PartialEq)]
+/// struct Rational {
+/// numerator: usize,
+/// denominator: usize,
+/// }
+///
+/// impl Rational {
+/// fn new(numerator: usize, denominator: usize) -> Self {
+/// if denominator == 0 {
+/// panic!("Zero is an invalid denominator!");
+/// }
+///
+/// // Reduce to lowest terms by dividing by the greatest common
+/// // divisor.
+/// let gcd = gcd(numerator, denominator);
+/// Self {
+/// numerator: numerator / gcd,
+/// denominator: denominator / gcd,
+/// }
+/// }
+/// }
+///
+/// impl Mul for Rational {
+/// // The multiplication of rational numbers is a closed operation.
+/// type Output = Self;
+///
+/// fn mul(self, rhs: Self) -> Self {
+/// let numerator = self.numerator * rhs.numerator;
+/// let denominator = self.denominator * rhs.denominator;
+/// Self::new(numerator, denominator)
+/// }
+/// }
+///
+/// // Euclid's two-thousand-year-old algorithm for finding the greatest common
+/// // divisor.
+/// fn gcd(x: usize, y: usize) -> usize {
+/// let mut x = x;
+/// let mut y = y;
+/// while y != 0 {
+/// let t = y;
+/// y = x % y;
+/// x = t;
+/// }
+/// x
+/// }
+///
+/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
+/// assert_eq!(Rational::new(2, 3) * Rational::new(3, 4),
+/// Rational::new(1, 2));
+/// ```
+///
+/// ## Multiplying vectors by scalars as in linear algebra
+///
+/// ```
+/// use std::ops::Mul;
+///
+/// struct Scalar { value: usize }
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Vector { value: Vec<usize> }
+///
+/// impl Mul<Scalar> for Vector {
+/// type Output = Self;
+///
+/// fn mul(self, rhs: Scalar) -> Self::Output {
+/// Self { value: self.value.iter().map(|v| v * rhs.value).collect() }
+/// }
+/// }
+///
+/// let vector = Vector { value: vec![2, 4, 6] };
+/// let scalar = Scalar { value: 3 };
+/// assert_eq!(vector * scalar, Vector { value: vec![6, 12, 18] });
+/// ```
+#[lang = "mul"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot multiply `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} * {Rhs}`"
+)]
+#[doc(alias = "*")]
+pub trait Mul<Rhs = Self> {
+ /// The resulting type after applying the `*` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `*` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 * 2, 24);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn mul(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! mul_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Mul for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn mul(self, other: $t) -> $t { self * other }
+ }
+
+ forward_ref_binop! { impl Mul, mul for $t, $t }
+ )*)
+}
+
+mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The division operator `/`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// ## `Div`idable rational numbers
+///
+/// ```
+/// use std::ops::Div;
+///
+/// // By the fundamental theorem of arithmetic, rational numbers in lowest
+/// // terms are unique. So, by keeping `Rational`s in reduced form, we can
+/// // derive `Eq` and `PartialEq`.
+/// #[derive(Debug, Eq, PartialEq)]
+/// struct Rational {
+/// numerator: usize,
+/// denominator: usize,
+/// }
+///
+/// impl Rational {
+/// fn new(numerator: usize, denominator: usize) -> Self {
+/// if denominator == 0 {
+/// panic!("Zero is an invalid denominator!");
+/// }
+///
+/// // Reduce to lowest terms by dividing by the greatest common
+/// // divisor.
+/// let gcd = gcd(numerator, denominator);
+/// Self {
+/// numerator: numerator / gcd,
+/// denominator: denominator / gcd,
+/// }
+/// }
+/// }
+///
+/// impl Div for Rational {
+/// // The division of rational numbers is a closed operation.
+/// type Output = Self;
+///
+/// fn div(self, rhs: Self) -> Self::Output {
+/// if rhs.numerator == 0 {
+/// panic!("Cannot divide by zero-valued `Rational`!");
+/// }
+///
+/// let numerator = self.numerator * rhs.denominator;
+/// let denominator = self.denominator * rhs.numerator;
+/// Self::new(numerator, denominator)
+/// }
+/// }
+///
+/// // Euclid's two-thousand-year-old algorithm for finding the greatest common
+/// // divisor.
+/// fn gcd(x: usize, y: usize) -> usize {
+/// let mut x = x;
+/// let mut y = y;
+/// while y != 0 {
+/// let t = y;
+/// y = x % y;
+/// x = t;
+/// }
+/// x
+/// }
+///
+/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4));
+/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4),
+/// Rational::new(2, 3));
+/// ```
+///
+/// ## Dividing vectors by scalars as in linear algebra
+///
+/// ```
+/// use std::ops::Div;
+///
+/// struct Scalar { value: f32 }
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Vector { value: Vec<f32> }
+///
+/// impl Div<Scalar> for Vector {
+/// type Output = Self;
+///
+/// fn div(self, rhs: Scalar) -> Self::Output {
+/// Self { value: self.value.iter().map(|v| v / rhs.value).collect() }
+/// }
+/// }
+///
+/// let scalar = Scalar { value: 2f32 };
+/// let vector = Vector { value: vec![2f32, 4f32, 6f32] };
+/// assert_eq!(vector / scalar, Vector { value: vec![1f32, 2f32, 3f32] });
+/// ```
+#[lang = "div"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot divide `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} / {Rhs}`"
+)]
+#[doc(alias = "/")]
+pub trait Div<Rhs = Self> {
+ /// The resulting type after applying the `/` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `/` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 / 2, 6);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn div(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! div_impl_integer {
+ ($($t:ty)*) => ($(
+ /// This operation rounds towards zero, truncating any
+ /// fractional part of the exact result.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Div for $t {
+ type Output = $t;
+
+ #[inline]
+ fn div(self, other: $t) -> $t { self / other }
+ }
+
+ forward_ref_binop! { impl Div, div for $t, $t }
+ )*)
+}
+
+div_impl_integer! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! div_impl_float {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Div for $t {
+ type Output = $t;
+
+ #[inline]
+ fn div(self, other: $t) -> $t { self / other }
+ }
+
+ forward_ref_binop! { impl Div, div for $t, $t }
+ )*)
+}
+
+div_impl_float! { f32 f64 }
+
+/// The remainder operator `%`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// This example implements `Rem` on a `SplitSlice` object. After `Rem` is
+/// implemented, one can use the `%` operator to find out what the remaining
+/// elements of the slice would be after splitting it into equal slices of a
+/// given length.
+///
+/// ```
+/// use std::ops::Rem;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct SplitSlice<'a, T: 'a> {
+/// slice: &'a [T],
+/// }
+///
+/// impl<'a, T> Rem<usize> for SplitSlice<'a, T> {
+/// type Output = Self;
+///
+/// fn rem(self, modulus: usize) -> Self::Output {
+/// let len = self.slice.len();
+/// let rem = len % modulus;
+/// let start = len - rem;
+/// Self {slice: &self.slice[start..]}
+/// }
+/// }
+///
+/// // If we were to divide &[0, 1, 2, 3, 4, 5, 6, 7] into slices of size 3,
+/// // the remainder would be &[6, 7].
+/// assert_eq!(SplitSlice { slice: &[0, 1, 2, 3, 4, 5, 6, 7] } % 3,
+/// SplitSlice { slice: &[6, 7] });
+/// ```
+#[lang = "rem"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "cannot mod `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} % {Rhs}`"
+)]
+#[doc(alias = "%")]
+pub trait Rem<Rhs = Self> {
+ /// The resulting type after applying the `%` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `%` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// assert_eq!(12 % 10, 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn rem(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! rem_impl_integer {
+ ($($t:ty)*) => ($(
+ /// This operation satisfies `n % d == n - (n / d) * d`. The
+ /// result has the same sign as the left operand.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Rem for $t {
+ type Output = $t;
+
+ #[inline]
+ fn rem(self, other: $t) -> $t { self % other }
+ }
+
+ forward_ref_binop! { impl Rem, rem for $t, $t }
+ )*)
+}
+
+rem_impl_integer! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+macro_rules! rem_impl_float {
+ ($($t:ty)*) => ($(
+
+ /// The remainder from the division of two floats.
+ ///
+ /// The remainder has the same sign as the dividend and is computed as:
+ /// `x - (x / y).trunc() * y`.
+ ///
+ /// # Examples
+ /// ```
+ /// let x: f32 = 50.50;
+ /// let y: f32 = 8.125;
+ /// let remainder = x - (x / y).trunc() * y;
+ ///
+ /// // The answer to both operations is 1.75
+ /// assert_eq!(x % y, remainder);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Rem for $t {
+ type Output = $t;
+
+ #[inline]
+ fn rem(self, other: $t) -> $t { self % other }
+ }
+
+ forward_ref_binop! { impl Rem, rem for $t, $t }
+ )*)
+}
+
+rem_impl_float! { f32 f64 }
+
+/// The unary negation operator `-`.
+///
+/// # Examples
+///
+/// An implementation of `Neg` for `Sign`, which allows the use of `-` to
+/// negate its value.
+///
+/// ```
+/// use std::ops::Neg;
+///
+/// #[derive(Debug, PartialEq)]
+/// enum Sign {
+/// Negative,
+/// Zero,
+/// Positive,
+/// }
+///
+/// impl Neg for Sign {
+/// type Output = Self;
+///
+/// fn neg(self) -> Self::Output {
+/// match self {
+/// Sign::Negative => Sign::Positive,
+/// Sign::Zero => Sign::Zero,
+/// Sign::Positive => Sign::Negative,
+/// }
+/// }
+/// }
+///
+/// // A negative positive is a negative.
+/// assert_eq!(-Sign::Positive, Sign::Negative);
+/// // A double negative is a positive.
+/// assert_eq!(-Sign::Negative, Sign::Positive);
+/// // Zero is its own negation.
+/// assert_eq!(-Sign::Zero, Sign::Zero);
+/// ```
+#[lang = "neg"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "-")]
+pub trait Neg {
+ /// The resulting type after applying the `-` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the unary `-` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let x: i32 = 12;
+ /// assert_eq!(-x, -12);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn neg(self) -> Self::Output;
+}
+
+macro_rules! neg_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Neg for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn neg(self) -> $t { -self }
+ }
+
+ forward_ref_unop! { impl Neg, neg for $t }
+ )*)
+}
+
+neg_impl! { isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The addition assignment operator `+=`.
+///
+/// # Examples
+///
+/// This example creates a `Point` struct that implements the `AddAssign`
+/// trait, and then demonstrates add-assigning to a mutable `Point`.
+///
+/// ```
+/// use std::ops::AddAssign;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl AddAssign for Point {
+/// fn add_assign(&mut self, other: Self) {
+/// *self = Self {
+/// x: self.x + other.x,
+/// y: self.y + other.y,
+/// };
+/// }
+/// }
+///
+/// let mut point = Point { x: 1, y: 0 };
+/// point += Point { x: 2, y: 3 };
+/// assert_eq!(point, Point { x: 3, y: 3 });
+/// ```
+#[lang = "add_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot add-assign `{Rhs}` to `{Self}`",
+ label = "no implementation for `{Self} += {Rhs}`"
+)]
+#[doc(alias = "+")]
+#[doc(alias = "+=")]
+pub trait AddAssign<Rhs = Self> {
+ /// Performs the `+=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x += 1;
+ /// assert_eq!(x, 13);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn add_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! add_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl AddAssign for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn add_assign(&mut self, other: $t) { *self += other }
+ }
+
+ forward_ref_op_assign! { impl AddAssign, add_assign for $t, $t }
+ )+)
+}
+
+add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The subtraction assignment operator `-=`.
+///
+/// # Examples
+///
+/// This example creates a `Point` struct that implements the `SubAssign`
+/// trait, and then demonstrates sub-assigning to a mutable `Point`.
+///
+/// ```
+/// use std::ops::SubAssign;
+///
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32,
+/// }
+///
+/// impl SubAssign for Point {
+/// fn sub_assign(&mut self, other: Self) {
+/// *self = Self {
+/// x: self.x - other.x,
+/// y: self.y - other.y,
+/// };
+/// }
+/// }
+///
+/// let mut point = Point { x: 3, y: 3 };
+/// point -= Point { x: 2, y: 3 };
+/// assert_eq!(point, Point {x: 1, y: 0});
+/// ```
+#[lang = "sub_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot subtract-assign `{Rhs}` from `{Self}`",
+ label = "no implementation for `{Self} -= {Rhs}`"
+)]
+#[doc(alias = "-")]
+#[doc(alias = "-=")]
+pub trait SubAssign<Rhs = Self> {
+ /// Performs the `-=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x -= 1;
+ /// assert_eq!(x, 11);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn sub_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! sub_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl SubAssign for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn sub_assign(&mut self, other: $t) { *self -= other }
+ }
+
+ forward_ref_op_assign! { impl SubAssign, sub_assign for $t, $t }
+ )+)
+}
+
+sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The multiplication assignment operator `*=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::MulAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Frequency { hertz: f64 }
+///
+/// impl MulAssign<f64> for Frequency {
+/// fn mul_assign(&mut self, rhs: f64) {
+/// self.hertz *= rhs;
+/// }
+/// }
+///
+/// let mut frequency = Frequency { hertz: 50.0 };
+/// frequency *= 4.0;
+/// assert_eq!(Frequency { hertz: 200.0 }, frequency);
+/// ```
+#[lang = "mul_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot multiply-assign `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} *= {Rhs}`"
+)]
+#[doc(alias = "*")]
+#[doc(alias = "*=")]
+pub trait MulAssign<Rhs = Self> {
+ /// Performs the `*=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x *= 2;
+ /// assert_eq!(x, 24);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn mul_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! mul_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl MulAssign for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn mul_assign(&mut self, other: $t) { *self *= other }
+ }
+
+ forward_ref_op_assign! { impl MulAssign, mul_assign for $t, $t }
+ )+)
+}
+
+mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The division assignment operator `/=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::DivAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Frequency { hertz: f64 }
+///
+/// impl DivAssign<f64> for Frequency {
+/// fn div_assign(&mut self, rhs: f64) {
+/// self.hertz /= rhs;
+/// }
+/// }
+///
+/// let mut frequency = Frequency { hertz: 200.0 };
+/// frequency /= 4.0;
+/// assert_eq!(Frequency { hertz: 50.0 }, frequency);
+/// ```
+#[lang = "div_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot divide-assign `{Self}` by `{Rhs}`",
+ label = "no implementation for `{Self} /= {Rhs}`"
+)]
+#[doc(alias = "/")]
+#[doc(alias = "/=")]
+pub trait DivAssign<Rhs = Self> {
+ /// Performs the `/=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x /= 2;
+ /// assert_eq!(x, 6);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn div_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! div_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl DivAssign for $t {
+ #[inline]
+ fn div_assign(&mut self, other: $t) { *self /= other }
+ }
+
+ forward_ref_op_assign! { impl DivAssign, div_assign for $t, $t }
+ )+)
+}
+
+div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
+
+/// The remainder assignment operator `%=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::RemAssign;
+///
+/// struct CookieJar { cookies: u32 }
+///
+/// impl RemAssign<u32> for CookieJar {
+/// fn rem_assign(&mut self, piles: u32) {
+/// self.cookies %= piles;
+/// }
+/// }
+///
+/// let mut jar = CookieJar { cookies: 31 };
+/// let piles = 4;
+///
+/// println!("Splitting up {} cookies into {} even piles!", jar.cookies, piles);
+///
+/// jar %= piles;
+///
+/// println!("{} cookies remain in the cookie jar!", jar.cookies);
+/// ```
+#[lang = "rem_assign"]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "cannot mod-assign `{Self}` by `{Rhs}``",
+ label = "no implementation for `{Self} %= {Rhs}`"
+)]
+#[doc(alias = "%")]
+#[doc(alias = "%=")]
+pub trait RemAssign<Rhs = Self> {
+ /// Performs the `%=` operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut x: u32 = 12;
+ /// x %= 10;
+ /// assert_eq!(x, 2);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn rem_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! rem_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl RemAssign for $t {
+ #[inline]
+ fn rem_assign(&mut self, other: $t) { *self %= other }
+ }
+
+ forward_ref_op_assign! { impl RemAssign, rem_assign for $t, $t }
+ )+)
+}
+
+rem_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 }
--- /dev/null
+/// The unary logical negation operator `!`.
+///
+/// # Examples
+///
+/// An implementation of `Not` for `Answer`, which enables the use of `!` to
+/// invert its value.
+///
+/// ```
+/// use std::ops::Not;
+///
+/// #[derive(Debug, PartialEq)]
+/// enum Answer {
+/// Yes,
+/// No,
+/// }
+///
+/// impl Not for Answer {
+/// type Output = Self;
+///
+/// fn not(self) -> Self::Output {
+/// match self {
+/// Answer::Yes => Answer::No,
+/// Answer::No => Answer::Yes
+/// }
+/// }
+/// }
+///
+/// assert_eq!(!Answer::Yes, Answer::No);
+/// assert_eq!(!Answer::No, Answer::Yes);
+/// ```
+#[lang = "not"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Not {
+ /// The resulting type after applying the `!` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the unary `!` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(!true, false);
+ /// assert_eq!(!false, true);
+ /// assert_eq!(!1u8, 254);
+ /// assert_eq!(!0u8, 255);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn not(self) -> Self::Output;
+}
+
+macro_rules! not_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Not for $t {
+ type Output = $t;
+
+ #[inline]
+ fn not(self) -> $t { !self }
+ }
+
+ forward_ref_unop! { impl Not, not for $t }
+ )*)
+}
+
+not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise AND operator `&`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// An implementation of `BitAnd` for a wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitAnd;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitAnd for Scalar {
+/// type Output = Self;
+///
+/// // rhs is the "right-hand side" of the expression `a & b`
+/// fn bitand(self, rhs: Self) -> Self::Output {
+/// Self(self.0 & rhs.0)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(true) & Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(true) & Scalar(false), Scalar(false));
+/// assert_eq!(Scalar(false) & Scalar(true), Scalar(false));
+/// assert_eq!(Scalar(false) & Scalar(false), Scalar(false));
+/// ```
+///
+/// An implementation of `BitAnd` for a wrapper around `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitAnd;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitAnd for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitand(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// assert_eq!(lhs.len(), rhs.len());
+/// Self(
+/// lhs.iter()
+/// .zip(rhs.iter())
+/// .map(|(x, y)| *x & *y)
+/// .collect()
+/// )
+/// }
+/// }
+///
+/// let bv1 = BooleanVector(vec![true, true, false, false]);
+/// let bv2 = BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![true, false, false, false]);
+/// assert_eq!(bv1 & bv2, expected);
+/// ```
+#[lang = "bitand"]
+#[doc(alias = "&")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} & {Rhs}`",
+ label = "no implementation for `{Self} & {Rhs}`"
+)]
+pub trait BitAnd<Rhs = Self> {
+ /// The resulting type after applying the `&` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `&` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(true & false, false);
+ /// assert_eq!(true & true, true);
+ /// assert_eq!(5u8 & 1u8, 1);
+ /// assert_eq!(5u8 & 2u8, 0);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bitand(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! bitand_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitAnd for $t {
+ type Output = $t;
+
+ #[inline]
+ fn bitand(self, rhs: $t) -> $t { self & rhs }
+ }
+
+ forward_ref_binop! { impl BitAnd, bitand for $t, $t }
+ )*)
+}
+
+bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise OR operator `|`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// An implementation of `BitOr` for a wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitOr;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitOr for Scalar {
+/// type Output = Self;
+///
+/// // rhs is the "right-hand side" of the expression `a | b`
+/// fn bitor(self, rhs: Self) -> Self::Output {
+/// Self(self.0 | rhs.0)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(true) | Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(true) | Scalar(false), Scalar(true));
+/// assert_eq!(Scalar(false) | Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(false) | Scalar(false), Scalar(false));
+/// ```
+///
+/// An implementation of `BitOr` for a wrapper around `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitOr;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitOr for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitor(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// assert_eq!(lhs.len(), rhs.len());
+/// Self(
+/// lhs.iter()
+/// .zip(rhs.iter())
+/// .map(|(x, y)| *x | *y)
+/// .collect()
+/// )
+/// }
+/// }
+///
+/// let bv1 = BooleanVector(vec![true, true, false, false]);
+/// let bv2 = BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![true, true, true, false]);
+/// assert_eq!(bv1 | bv2, expected);
+/// ```
+#[lang = "bitor"]
+#[doc(alias = "|")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} | {Rhs}`",
+ label = "no implementation for `{Self} | {Rhs}`"
+)]
+pub trait BitOr<Rhs = Self> {
+ /// The resulting type after applying the `|` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `|` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(true | false, true);
+ /// assert_eq!(false | false, false);
+ /// assert_eq!(5u8 | 1u8, 5);
+ /// assert_eq!(5u8 | 2u8, 7);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bitor(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! bitor_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitOr for $t {
+ type Output = $t;
+
+ #[inline]
+ fn bitor(self, rhs: $t) -> $t { self | rhs }
+ }
+
+ forward_ref_binop! { impl BitOr, bitor for $t, $t }
+ )*)
+}
+
+bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise XOR operator `^`.
+///
+/// Note that `Rhs` is `Self` by default, but this is not mandatory.
+///
+/// # Examples
+///
+/// An implementation of `BitXor` that lifts `^` to a wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitXor;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitXor for Scalar {
+/// type Output = Self;
+///
+/// // rhs is the "right-hand side" of the expression `a ^ b`
+/// fn bitxor(self, rhs: Self) -> Self::Output {
+/// Self(self.0 ^ rhs.0)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(true) ^ Scalar(true), Scalar(false));
+/// assert_eq!(Scalar(true) ^ Scalar(false), Scalar(true));
+/// assert_eq!(Scalar(false) ^ Scalar(true), Scalar(true));
+/// assert_eq!(Scalar(false) ^ Scalar(false), Scalar(false));
+/// ```
+///
+/// An implementation of `BitXor` trait for a wrapper around `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitXor;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitXor for BooleanVector {
+/// type Output = Self;
+///
+/// fn bitxor(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// assert_eq!(lhs.len(), rhs.len());
+/// Self(
+/// lhs.iter()
+/// .zip(rhs.iter())
+/// .map(|(x, y)| *x ^ *y)
+/// .collect()
+/// )
+/// }
+/// }
+///
+/// let bv1 = BooleanVector(vec![true, true, false, false]);
+/// let bv2 = BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![false, true, true, false]);
+/// assert_eq!(bv1 ^ bv2, expected);
+/// ```
+#[lang = "bitxor"]
+#[doc(alias = "^")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} ^ {Rhs}`",
+ label = "no implementation for `{Self} ^ {Rhs}`"
+)]
+pub trait BitXor<Rhs = Self> {
+ /// The resulting type after applying the `^` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `^` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(true ^ false, true);
+ /// assert_eq!(true ^ true, false);
+ /// assert_eq!(5u8 ^ 1u8, 4);
+ /// assert_eq!(5u8 ^ 2u8, 7);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn bitxor(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! bitxor_impl {
+ ($($t:ty)*) => ($(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl BitXor for $t {
+ type Output = $t;
+
+ #[inline]
+ fn bitxor(self, other: $t) -> $t { self ^ other }
+ }
+
+ forward_ref_binop! { impl BitXor, bitxor for $t, $t }
+ )*)
+}
+
+bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The left shift operator `<<`. Note that because this trait is implemented
+/// for all integer types with multiple right-hand-side types, Rust's type
+/// checker has special handling for `_ << _`, setting the result type for
+/// integer operations to the type of the left-hand-side operand. This means
+/// that though `a << b` and `a.shl(b)` are one and the same from an evaluation
+/// standpoint, they are different when it comes to type inference.
+///
+/// # Examples
+///
+/// An implementation of `Shl` that lifts the `<<` operation on integers to a
+/// wrapper around `usize`.
+///
+/// ```
+/// use std::ops::Shl;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct Scalar(usize);
+///
+/// impl Shl<Scalar> for Scalar {
+/// type Output = Self;
+///
+/// fn shl(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// Self(lhs << rhs)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(4) << Scalar(2), Scalar(16));
+/// ```
+///
+/// An implementation of `Shl` that spins a vector leftward by a given amount.
+///
+/// ```
+/// use std::ops::Shl;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct SpinVector<T: Clone> {
+/// vec: Vec<T>,
+/// }
+///
+/// impl<T: Clone> Shl<usize> for SpinVector<T> {
+/// type Output = Self;
+///
+/// fn shl(self, rhs: usize) -> Self::Output {
+/// // Rotate the vector by `rhs` places.
+/// let (a, b) = self.vec.split_at(rhs);
+/// let mut spun_vector = vec![];
+/// spun_vector.extend_from_slice(b);
+/// spun_vector.extend_from_slice(a);
+/// Self { vec: spun_vector }
+/// }
+/// }
+///
+/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } << 2,
+/// SpinVector { vec: vec![2, 3, 4, 0, 1] });
+/// ```
+#[lang = "shl"]
+#[doc(alias = "<<")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} << {Rhs}`",
+ label = "no implementation for `{Self} << {Rhs}`"
+)]
+pub trait Shl<Rhs = Self> {
+ /// The resulting type after applying the `<<` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `<<` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(5u8 << 1, 10);
+ /// assert_eq!(1u8 << 1, 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn shl(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! shl_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shl<$f> for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shl(self, other: $f) -> $t {
+ self << other
+ }
+ }
+
+ forward_ref_binop! { impl Shl, shl for $t, $f }
+ };
+}
+
+macro_rules! shl_impl_all {
+ ($($t:ty)*) => ($(
+ shl_impl! { $t, u8 }
+ shl_impl! { $t, u16 }
+ shl_impl! { $t, u32 }
+ shl_impl! { $t, u64 }
+ shl_impl! { $t, u128 }
+ shl_impl! { $t, usize }
+
+ shl_impl! { $t, i8 }
+ shl_impl! { $t, i16 }
+ shl_impl! { $t, i32 }
+ shl_impl! { $t, i64 }
+ shl_impl! { $t, i128 }
+ shl_impl! { $t, isize }
+ )*)
+}
+
+shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 }
+
+/// The right shift operator `>>`. Note that because this trait is implemented
+/// for all integer types with multiple right-hand-side types, Rust's type
+/// checker has special handling for `_ >> _`, setting the result type for
+/// integer operations to the type of the left-hand-side operand. This means
+/// that though `a >> b` and `a.shr(b)` are one and the same from an evaluation
+/// standpoint, they are different when it comes to type inference.
+///
+/// # Examples
+///
+/// An implementation of `Shr` that lifts the `>>` operation on integers to a
+/// wrapper around `usize`.
+///
+/// ```
+/// use std::ops::Shr;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct Scalar(usize);
+///
+/// impl Shr<Scalar> for Scalar {
+/// type Output = Self;
+///
+/// fn shr(self, Self(rhs): Self) -> Self::Output {
+/// let Self(lhs) = self;
+/// Self(lhs >> rhs)
+/// }
+/// }
+///
+/// assert_eq!(Scalar(16) >> Scalar(2), Scalar(4));
+/// ```
+///
+/// An implementation of `Shr` that spins a vector rightward by a given amount.
+///
+/// ```
+/// use std::ops::Shr;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct SpinVector<T: Clone> {
+/// vec: Vec<T>,
+/// }
+///
+/// impl<T: Clone> Shr<usize> for SpinVector<T> {
+/// type Output = Self;
+///
+/// fn shr(self, rhs: usize) -> Self::Output {
+/// // Rotate the vector by `rhs` places.
+/// let (a, b) = self.vec.split_at(self.vec.len() - rhs);
+/// let mut spun_vector = vec![];
+/// spun_vector.extend_from_slice(b);
+/// spun_vector.extend_from_slice(a);
+/// Self { vec: spun_vector }
+/// }
+/// }
+///
+/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } >> 2,
+/// SpinVector { vec: vec![3, 4, 0, 1, 2] });
+/// ```
+#[lang = "shr"]
+#[doc(alias = ">>")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} >> {Rhs}`",
+ label = "no implementation for `{Self} >> {Rhs}`"
+)]
+pub trait Shr<Rhs = Self> {
+ /// The resulting type after applying the `>>` operator.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output;
+
+ /// Performs the `>>` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(5u8 >> 1, 2);
+ /// assert_eq!(2u8 >> 1, 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn shr(self, rhs: Rhs) -> Self::Output;
+}
+
+macro_rules! shr_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl Shr<$f> for $t {
+ type Output = $t;
+
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shr(self, other: $f) -> $t {
+ self >> other
+ }
+ }
+
+ forward_ref_binop! { impl Shr, shr for $t, $f }
+ };
+}
+
+macro_rules! shr_impl_all {
+ ($($t:ty)*) => ($(
+ shr_impl! { $t, u8 }
+ shr_impl! { $t, u16 }
+ shr_impl! { $t, u32 }
+ shr_impl! { $t, u64 }
+ shr_impl! { $t, u128 }
+ shr_impl! { $t, usize }
+
+ shr_impl! { $t, i8 }
+ shr_impl! { $t, i16 }
+ shr_impl! { $t, i32 }
+ shr_impl! { $t, i64 }
+ shr_impl! { $t, i128 }
+ shr_impl! { $t, isize }
+ )*)
+}
+
+shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+/// The bitwise AND assignment operator `&=`.
+///
+/// # Examples
+///
+/// An implementation of `BitAndAssign` that lifts the `&=` operator to a
+/// wrapper around `bool`.
+///
+/// ```
+/// use std::ops::BitAndAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(bool);
+///
+/// impl BitAndAssign for Scalar {
+/// // rhs is the "right-hand side" of the expression `a &= b`
+/// fn bitand_assign(&mut self, rhs: Self) {
+/// *self = Self(self.0 & rhs.0)
+/// }
+/// }
+///
+/// let mut scalar = Scalar(true);
+/// scalar &= Scalar(true);
+/// assert_eq!(scalar, Scalar(true));
+///
+/// let mut scalar = Scalar(true);
+/// scalar &= Scalar(false);
+/// assert_eq!(scalar, Scalar(false));
+///
+/// let mut scalar = Scalar(false);
+/// scalar &= Scalar(true);
+/// assert_eq!(scalar, Scalar(false));
+///
+/// let mut scalar = Scalar(false);
+/// scalar &= Scalar(false);
+/// assert_eq!(scalar, Scalar(false));
+/// ```
+///
+/// Here, the `BitAndAssign` trait is implemented for a wrapper around
+/// `Vec<bool>`.
+///
+/// ```
+/// use std::ops::BitAndAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct BooleanVector(Vec<bool>);
+///
+/// impl BitAndAssign for BooleanVector {
+/// // `rhs` is the "right-hand side" of the expression `a &= b`.
+/// fn bitand_assign(&mut self, rhs: Self) {
+/// assert_eq!(self.0.len(), rhs.0.len());
+/// *self = Self(
+/// self.0
+/// .iter()
+/// .zip(rhs.0.iter())
+/// .map(|(x, y)| *x & *y)
+/// .collect()
+/// );
+/// }
+/// }
+///
+/// let mut bv = BooleanVector(vec![true, true, false, false]);
+/// bv &= BooleanVector(vec![true, false, true, false]);
+/// let expected = BooleanVector(vec![true, false, false, false]);
+/// assert_eq!(bv, expected);
+/// ```
+#[lang = "bitand_assign"]
+#[doc(alias = "&=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} &= {Rhs}`",
+ label = "no implementation for `{Self} &= {Rhs}`"
+)]
+pub trait BitAndAssign<Rhs = Self> {
+ /// Performs the `&=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = true;
+ /// x &= false;
+ /// assert_eq!(x, false);
+ ///
+ /// let mut x = true;
+ /// x &= true;
+ /// assert_eq!(x, true);
+ ///
+ /// let mut x: u8 = 5;
+ /// x &= 1;
+ /// assert_eq!(x, 1);
+ ///
+ /// let mut x: u8 = 5;
+ /// x &= 2;
+ /// assert_eq!(x, 0);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn bitand_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! bitand_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitAndAssign for $t {
+ #[inline]
+ fn bitand_assign(&mut self, other: $t) { *self &= other }
+ }
+
+ forward_ref_op_assign! { impl BitAndAssign, bitand_assign for $t, $t }
+ )+)
+}
+
+bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise OR assignment operator `|=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::BitOrAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct PersonalPreferences {
+/// likes_cats: bool,
+/// likes_dogs: bool,
+/// }
+///
+/// impl BitOrAssign for PersonalPreferences {
+/// fn bitor_assign(&mut self, rhs: Self) {
+/// self.likes_cats |= rhs.likes_cats;
+/// self.likes_dogs |= rhs.likes_dogs;
+/// }
+/// }
+///
+/// let mut prefs = PersonalPreferences { likes_cats: true, likes_dogs: false };
+/// prefs |= PersonalPreferences { likes_cats: false, likes_dogs: true };
+/// assert_eq!(prefs, PersonalPreferences { likes_cats: true, likes_dogs: true });
+/// ```
+#[lang = "bitor_assign"]
+#[doc(alias = "|=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} |= {Rhs}`",
+ label = "no implementation for `{Self} |= {Rhs}`"
+)]
+pub trait BitOrAssign<Rhs = Self> {
+ /// Performs the `|=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = true;
+ /// x |= false;
+ /// assert_eq!(x, true);
+ ///
+ /// let mut x = false;
+ /// x |= false;
+ /// assert_eq!(x, false);
+ ///
+ /// let mut x: u8 = 5;
+ /// x |= 1;
+ /// assert_eq!(x, 5);
+ ///
+ /// let mut x: u8 = 5;
+ /// x |= 2;
+ /// assert_eq!(x, 7);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn bitor_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! bitor_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitOrAssign for $t {
+ #[inline]
+ fn bitor_assign(&mut self, other: $t) { *self |= other }
+ }
+
+ forward_ref_op_assign! { impl BitOrAssign, bitor_assign for $t, $t }
+ )+)
+}
+
+bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The bitwise XOR assignment operator `^=`.
+///
+/// # Examples
+///
+/// ```
+/// use std::ops::BitXorAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Personality {
+/// has_soul: bool,
+/// likes_knitting: bool,
+/// }
+///
+/// impl BitXorAssign for Personality {
+/// fn bitxor_assign(&mut self, rhs: Self) {
+/// self.has_soul ^= rhs.has_soul;
+/// self.likes_knitting ^= rhs.likes_knitting;
+/// }
+/// }
+///
+/// let mut personality = Personality { has_soul: false, likes_knitting: true };
+/// personality ^= Personality { has_soul: true, likes_knitting: true };
+/// assert_eq!(personality, Personality { has_soul: true, likes_knitting: false});
+/// ```
+#[lang = "bitxor_assign"]
+#[doc(alias = "^=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} ^= {Rhs}`",
+ label = "no implementation for `{Self} ^= {Rhs}`"
+)]
+pub trait BitXorAssign<Rhs = Self> {
+ /// Performs the `^=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = true;
+ /// x ^= false;
+ /// assert_eq!(x, true);
+ ///
+ /// let mut x = true;
+ /// x ^= true;
+ /// assert_eq!(x, false);
+ ///
+ /// let mut x: u8 = 5;
+ /// x ^= 1;
+ /// assert_eq!(x, 4);
+ ///
+ /// let mut x: u8 = 5;
+ /// x ^= 2;
+ /// assert_eq!(x, 7);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn bitxor_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! bitxor_assign_impl {
+ ($($t:ty)+) => ($(
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl BitXorAssign for $t {
+ #[inline]
+ fn bitxor_assign(&mut self, other: $t) { *self ^= other }
+ }
+
+ forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for $t, $t }
+ )+)
+}
+
+bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 }
+
+/// The left shift assignment operator `<<=`.
+///
+/// # Examples
+///
+/// An implementation of `ShlAssign` for a wrapper around `usize`.
+///
+/// ```
+/// use std::ops::ShlAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(usize);
+///
+/// impl ShlAssign<usize> for Scalar {
+/// fn shl_assign(&mut self, rhs: usize) {
+/// self.0 <<= rhs;
+/// }
+/// }
+///
+/// let mut scalar = Scalar(4);
+/// scalar <<= 2;
+/// assert_eq!(scalar, Scalar(16));
+/// ```
+#[lang = "shl_assign"]
+#[doc(alias = "<<=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} <<= {Rhs}`",
+ label = "no implementation for `{Self} <<= {Rhs}`"
+)]
+pub trait ShlAssign<Rhs = Self> {
+ /// Performs the `<<=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x: u8 = 5;
+ /// x <<= 1;
+ /// assert_eq!(x, 10);
+ ///
+ /// let mut x: u8 = 1;
+ /// x <<= 1;
+ /// assert_eq!(x, 2);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn shl_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! shl_assign_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl ShlAssign<$f> for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shl_assign(&mut self, other: $f) {
+ *self <<= other
+ }
+ }
+
+ forward_ref_op_assign! { impl ShlAssign, shl_assign for $t, $f }
+ };
+}
+
+macro_rules! shl_assign_impl_all {
+ ($($t:ty)*) => ($(
+ shl_assign_impl! { $t, u8 }
+ shl_assign_impl! { $t, u16 }
+ shl_assign_impl! { $t, u32 }
+ shl_assign_impl! { $t, u64 }
+ shl_assign_impl! { $t, u128 }
+ shl_assign_impl! { $t, usize }
+
+ shl_assign_impl! { $t, i8 }
+ shl_assign_impl! { $t, i16 }
+ shl_assign_impl! { $t, i32 }
+ shl_assign_impl! { $t, i64 }
+ shl_assign_impl! { $t, i128 }
+ shl_assign_impl! { $t, isize }
+ )*)
+}
+
+shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
+
+/// The right shift assignment operator `>>=`.
+///
+/// # Examples
+///
+/// An implementation of `ShrAssign` for a wrapper around `usize`.
+///
+/// ```
+/// use std::ops::ShrAssign;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Scalar(usize);
+///
+/// impl ShrAssign<usize> for Scalar {
+/// fn shr_assign(&mut self, rhs: usize) {
+/// self.0 >>= rhs;
+/// }
+/// }
+///
+/// let mut scalar = Scalar(16);
+/// scalar >>= 2;
+/// assert_eq!(scalar, Scalar(4));
+/// ```
+#[lang = "shr_assign"]
+#[doc(alias = ">>=")]
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+#[rustc_on_unimplemented(
+ message = "no implementation for `{Self} >>= {Rhs}`",
+ label = "no implementation for `{Self} >>= {Rhs}`"
+)]
+pub trait ShrAssign<Rhs = Self> {
+ /// Performs the `>>=` operation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x: u8 = 5;
+ /// x >>= 1;
+ /// assert_eq!(x, 2);
+ ///
+ /// let mut x: u8 = 2;
+ /// x >>= 1;
+ /// assert_eq!(x, 1);
+ /// ```
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ fn shr_assign(&mut self, rhs: Rhs);
+}
+
+macro_rules! shr_assign_impl {
+ ($t:ty, $f:ty) => {
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
+ impl ShrAssign<$f> for $t {
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn shr_assign(&mut self, other: $f) {
+ *self >>= other
+ }
+ }
+
+ forward_ref_op_assign! { impl ShrAssign, shr_assign for $t, $f }
+ };
+}
+
+macro_rules! shr_assign_impl_all {
+ ($($t:ty)*) => ($(
+ shr_assign_impl! { $t, u8 }
+ shr_assign_impl! { $t, u16 }
+ shr_assign_impl! { $t, u32 }
+ shr_assign_impl! { $t, u64 }
+ shr_assign_impl! { $t, u128 }
+ shr_assign_impl! { $t, usize }
+
+ shr_assign_impl! { $t, i8 }
+ shr_assign_impl! { $t, i16 }
+ shr_assign_impl! { $t, i32 }
+ shr_assign_impl! { $t, i64 }
+ shr_assign_impl! { $t, i128 }
+ shr_assign_impl! { $t, isize }
+ )*)
+}
+
+shr_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
--- /dev/null
+use crate::ops::Try;
+
+/// Used to make try_fold closures more like normal loops
+#[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub enum ControlFlow<B, C = ()> {
+ /// Continue in the loop, using the given value for the next iteration
+ Continue(C),
+ /// Exit the loop, yielding the given value
+ Break(B),
+}
+
+#[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+impl<B, C> Try for ControlFlow<B, C> {
+ type Ok = C;
+ type Error = B;
+ #[inline]
+ fn into_result(self) -> Result<Self::Ok, Self::Error> {
+ match self {
+ ControlFlow::Continue(y) => Ok(y),
+ ControlFlow::Break(x) => Err(x),
+ }
+ }
+ #[inline]
+ fn from_error(v: Self::Error) -> Self {
+ ControlFlow::Break(v)
+ }
+ #[inline]
+ fn from_ok(v: Self::Ok) -> Self {
+ ControlFlow::Continue(v)
+ }
+}
+
+impl<B, C> ControlFlow<B, C> {
+ /// Returns `true` if this is a `Break` variant.
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn is_break(&self) -> bool {
+ matches!(*self, ControlFlow::Break(_))
+ }
+
+ /// Returns `true` if this is a `Continue` variant.
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn is_continue(&self) -> bool {
+ matches!(*self, ControlFlow::Continue(_))
+ }
+
+ /// Converts the `ControlFlow` into an `Option` which is `Some` if the
+ /// `ControlFlow` was `Break` and `None` otherwise.
+ #[inline]
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub fn break_value(self) -> Option<B> {
+ match self {
+ ControlFlow::Continue(..) => None,
+ ControlFlow::Break(x) => Some(x),
+ }
+ }
+}
+
+impl<R: Try> ControlFlow<R, R::Ok> {
+ /// Create a `ControlFlow` from any type implementing `Try`.
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ #[inline]
+ pub fn from_try(r: R) -> Self {
+ match Try::into_result(r) {
+ Ok(v) => ControlFlow::Continue(v),
+ Err(v) => ControlFlow::Break(Try::from_error(v)),
+ }
+ }
+
+ /// Convert a `ControlFlow` into any type implementing `Try`;
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ #[inline]
+ pub fn into_try(self) -> R {
+ match self {
+ ControlFlow::Continue(v) => Try::from_ok(v),
+ ControlFlow::Break(v) => v,
+ }
+ }
+}
+
+impl<B> ControlFlow<B, ()> {
+ /// It's frequently the case that there's no value needed with `Continue`,
+ /// so this provides a way to avoid typing `(())`, if you prefer it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(control_flow_enum)]
+ /// use std::ops::ControlFlow;
+ ///
+ /// let mut partial_sum = 0;
+ /// let last_used = (1..10).chain(20..25).try_for_each(|x| {
+ /// partial_sum += x;
+ /// if partial_sum > 100 { ControlFlow::Break(x) }
+ /// else { ControlFlow::CONTINUE }
+ /// });
+ /// assert_eq!(last_used.break_value(), Some(22));
+ /// ```
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub const CONTINUE: Self = ControlFlow::Continue(());
+}
+
+impl<C> ControlFlow<(), C> {
+ /// APIs like `try_for_each` don't need values with `Break`,
+ /// so this provides a way to avoid typing `(())`, if you prefer it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(control_flow_enum)]
+ /// use std::ops::ControlFlow;
+ ///
+ /// let mut partial_sum = 0;
+ /// (1..10).chain(20..25).try_for_each(|x| {
+ /// if partial_sum > 100 { ControlFlow::BREAK }
+ /// else { partial_sum += x; ControlFlow::CONTINUE }
+ /// });
+ /// assert_eq!(partial_sum, 108);
+ /// ```
+ #[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+ pub const BREAK: Self = ControlFlow::Break(());
+}
--- /dev/null
+/// Used for immutable dereferencing operations, like `*v`.
+///
+/// In addition to being used for explicit dereferencing operations with the
+/// (unary) `*` operator in immutable contexts, `Deref` is also used implicitly
+/// by the compiler in many circumstances. This mechanism is called
+/// ['`Deref` coercion'][more]. In mutable contexts, [`DerefMut`] is used.
+///
+/// Implementing `Deref` for smart pointers makes accessing the data behind them
+/// convenient, which is why they implement `Deref`. On the other hand, the
+/// rules regarding `Deref` and [`DerefMut`] were designed specifically to
+/// accommodate smart pointers. Because of this, **`Deref` should only be
+/// implemented for smart pointers** to avoid confusion.
+///
+/// For similar reasons, **this trait should never fail**. Failure during
+/// dereferencing can be extremely confusing when `Deref` is invoked implicitly.
+///
+/// # More on `Deref` coercion
+///
+/// If `T` implements `Deref<Target = U>`, and `x` is a value of type `T`, then:
+///
+/// * In immutable contexts, `*x` (where `T` is neither a reference nor a raw pointer)
+/// is equivalent to `*Deref::deref(&x)`.
+/// * Values of type `&T` are coerced to values of type `&U`
+/// * `T` implicitly implements all the (immutable) methods of the type `U`.
+///
+/// For more details, visit [the chapter in *The Rust Programming Language*][book]
+/// as well as the reference sections on [the dereference operator][ref-deref-op],
+/// [method resolution] and [type coercions].
+///
+/// [book]: ../../book/ch15-02-deref.html
+/// [more]: #more-on-deref-coercion
+/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator
+/// [method resolution]: ../../reference/expressions/method-call-expr.html
+/// [type coercions]: ../../reference/type-coercions.html
+///
+/// # Examples
+///
+/// A struct with a single field which is accessible by dereferencing the
+/// struct.
+///
+/// ```
+/// use std::ops::Deref;
+///
+/// struct DerefExample<T> {
+/// value: T
+/// }
+///
+/// impl<T> Deref for DerefExample<T> {
+/// type Target = T;
+///
+/// fn deref(&self) -> &Self::Target {
+/// &self.value
+/// }
+/// }
+///
+/// let x = DerefExample { value: 'a' };
+/// assert_eq!('a', *x);
+/// ```
+#[lang = "deref"]
+#[doc(alias = "*")]
+#[doc(alias = "&*")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Deref {
+ /// The resulting type after dereferencing.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "deref_target"]
+ type Target: ?Sized;
+
+ /// Dereferences the value.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_diagnostic_item = "deref_method"]
+ fn deref(&self) -> &Self::Target;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for &T {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ *self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !DerefMut for &T {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for &mut T {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ *self
+ }
+}
+
+/// Used for mutable dereferencing operations, like in `*v = 1;`.
+///
+/// In addition to being used for explicit dereferencing operations with the
+/// (unary) `*` operator in mutable contexts, `DerefMut` is also used implicitly
+/// by the compiler in many circumstances. This mechanism is called
+/// ['`Deref` coercion'][more]. In immutable contexts, [`Deref`] is used.
+///
+/// Implementing `DerefMut` for smart pointers makes mutating the data behind
+/// them convenient, which is why they implement `DerefMut`. On the other hand,
+/// the rules regarding [`Deref`] and `DerefMut` were designed specifically to
+/// accommodate smart pointers. Because of this, **`DerefMut` should only be
+/// implemented for smart pointers** to avoid confusion.
+///
+/// For similar reasons, **this trait should never fail**. Failure during
+/// dereferencing can be extremely confusing when `DerefMut` is invoked
+/// implicitly.
+///
+/// # More on `Deref` coercion
+///
+/// If `T` implements `DerefMut<Target = U>`, and `x` is a value of type `T`,
+/// then:
+///
+/// * In mutable contexts, `*x` (where `T` is neither a reference nor a raw pointer)
+/// is equivalent to `*DerefMut::deref_mut(&mut x)`.
+/// * Values of type `&mut T` are coerced to values of type `&mut U`
+/// * `T` implicitly implements all the (mutable) methods of the type `U`.
+///
+/// For more details, visit [the chapter in *The Rust Programming Language*][book]
+/// as well as the reference sections on [the dereference operator][ref-deref-op],
+/// [method resolution] and [type coercions].
+///
+/// [book]: ../../book/ch15-02-deref.html
+/// [more]: #more-on-deref-coercion
+/// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator
+/// [method resolution]: ../../reference/expressions/method-call-expr.html
+/// [type coercions]: ../../reference/type-coercions.html
+///
+/// # Examples
+///
+/// A struct with a single field which is modifiable by dereferencing the
+/// struct.
+///
+/// ```
+/// use std::ops::{Deref, DerefMut};
+///
+/// struct DerefMutExample<T> {
+/// value: T
+/// }
+///
+/// impl<T> Deref for DerefMutExample<T> {
+/// type Target = T;
+///
+/// fn deref(&self) -> &Self::Target {
+/// &self.value
+/// }
+/// }
+///
+/// impl<T> DerefMut for DerefMutExample<T> {
+/// fn deref_mut(&mut self) -> &mut Self::Target {
+/// &mut self.value
+/// }
+/// }
+///
+/// let mut x = DerefMutExample { value: 'a' };
+/// *x = 'b';
+/// assert_eq!('b', *x);
+/// ```
+#[lang = "deref_mut"]
+#[doc(alias = "*")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait DerefMut: Deref {
+ /// Mutably dereferences the value.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn deref_mut(&mut self) -> &mut Self::Target;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> DerefMut for &mut T {
+ fn deref_mut(&mut self) -> &mut T {
+ *self
+ }
+}
+
+/// Indicates that a struct can be used as a method receiver, without the
+/// `arbitrary_self_types` feature. This is implemented by stdlib pointer types like `Box<T>`,
+/// `Rc<T>`, `&T`, and `Pin<P>`.
+#[lang = "receiver"]
+#[unstable(feature = "receiver_trait", issue = "none")]
+#[doc(hidden)]
+pub trait Receiver {
+ // Empty.
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for &T {}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for &mut T {}
--- /dev/null
+/// Custom code within the destructor.
+///
+/// When a value is no longer needed, Rust will run a "destructor" on that value.
+/// The most common way that a value is no longer needed is when it goes out of
+/// scope. Destructors may still run in other circumstances, but we're going to
+/// focus on scope for the examples here. To learn about some of those other cases,
+/// please see [the reference] section on destructors.
+///
+/// [the reference]: https://doc.rust-lang.org/reference/destructors.html
+///
+/// This destructor consists of two components:
+/// - A call to `Drop::drop` for that value, if this special `Drop` trait is implemented for its type.
+/// - The automatically generated "drop glue" which recursively calls the destructors
+/// of the all fields of this value.
+///
+/// As Rust automatically calls the destructors of all contained fields,
+/// you don't have to implement `Drop` in most cases. But there are some cases where
+/// it is useful, for example for types which directly manage a resource.
+/// That resource may be memory, it may be a file descriptor, it may be a network socket.
+/// Once a value of that type is no longer going to be used, it should "clean up" its
+/// resource by freeing the memory or closing the file or socket. This is
+/// the job of a destructor, and therefore the job of `Drop::drop`.
+///
+/// ## Examples
+///
+/// To see destructors in action, let's take a look at the following program:
+///
+/// ```rust
+/// struct HasDrop;
+///
+/// impl Drop for HasDrop {
+/// fn drop(&mut self) {
+/// println!("Dropping HasDrop!");
+/// }
+/// }
+///
+/// struct HasTwoDrops {
+/// one: HasDrop,
+/// two: HasDrop,
+/// }
+///
+/// impl Drop for HasTwoDrops {
+/// fn drop(&mut self) {
+/// println!("Dropping HasTwoDrops!");
+/// }
+/// }
+///
+/// fn main() {
+/// let _x = HasTwoDrops { one: HasDrop, two: HasDrop };
+/// println!("Running!");
+/// }
+/// ```
+///
+/// Rust will first call `Drop::drop` for `_x` and then for both `_x.one` and `_x.two`,
+/// meaning that running this will print
+///
+/// ```text
+/// Running!
+/// Dropping HasTwoDrops!
+/// Dropping HasDrop!
+/// Dropping HasDrop!
+/// ```
+///
+/// Even if we remove the implementation of `Drop` for `HasTwoDrop`, the destructors of its fields are still called.
+/// This would result in
+///
+/// ```test
+/// Running!
+/// Dropping HasDrop!
+/// Dropping HasDrop!
+/// ```
+///
+/// ## You cannot call `Drop::drop` yourself
+///
+/// Because `Drop::drop` is used to clean up a value, it may be dangerous to use this value after
+/// the method has been called. As `Drop::drop` does not take ownership of its input,
+/// Rust prevents misuse by not allowing you to call `Drop::drop` directly.
+///
+/// In other words, if you tried to explicitly call `Drop::drop` in the above example, you'd get a compiler error.
+///
+/// If you'd like explicitly call the destructor of a value, [`mem::drop`] can be used instead.
+///
+/// [`mem::drop`]: drop
+///
+/// ## Drop order
+///
+/// Which of our two `HasDrop` drops first, though? For structs, it's the same
+/// order that they're declared: first `one`, then `two`. If you'd like to try
+/// this yourself, you can modify `HasDrop` above to contain some data, like an
+/// integer, and then use it in the `println!` inside of `Drop`. This behavior is
+/// guaranteed by the language.
+///
+/// Unlike for structs, local variables are dropped in reverse order:
+///
+/// ```rust
+/// struct Foo;
+///
+/// impl Drop for Foo {
+/// fn drop(&mut self) {
+/// println!("Dropping Foo!")
+/// }
+/// }
+///
+/// struct Bar;
+///
+/// impl Drop for Bar {
+/// fn drop(&mut self) {
+/// println!("Dropping Bar!")
+/// }
+/// }
+///
+/// fn main() {
+/// let _foo = Foo;
+/// let _bar = Bar;
+/// }
+/// ```
+///
+/// This will print
+///
+/// ```text
+/// Dropping Bar!
+/// Dropping Foo!
+/// ```
+///
+/// Please see [the reference] for the full rules.
+///
+/// [the reference]: https://doc.rust-lang.org/reference/destructors.html
+///
+/// ## `Copy` and `Drop` are exclusive
+///
+/// You cannot implement both [`Copy`] and `Drop` on the same type. Types that
+/// are `Copy` get implicitly duplicated by the compiler, making it very
+/// hard to predict when, and how often destructors will be executed. As such,
+/// these types cannot have destructors.
+#[lang = "drop"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait Drop {
+ /// Executes the destructor for this type.
+ ///
+ /// This method is called implicitly when the value goes out of scope,
+ /// and cannot be called explicitly (this is compiler error [E0040]).
+ /// However, the [`mem::drop`] function in the prelude can be
+ /// used to call the argument's `Drop` implementation.
+ ///
+ /// When this method has been called, `self` has not yet been deallocated.
+ /// That only happens after the method is over.
+ /// If this wasn't the case, `self` would be a dangling reference.
+ ///
+ /// # Panics
+ ///
+ /// Given that a [`panic!`] will call `drop` as it unwinds, any [`panic!`]
+ /// in a `drop` implementation will likely abort.
+ ///
+ /// Note that even if this panics, the value is considered to be dropped;
+ /// you must not cause `drop` to be called again. This is normally automatically
+ /// handled by the compiler, but when using unsafe code, can sometimes occur
+ /// unintentionally, particularly when using [`ptr::drop_in_place`].
+ ///
+ /// [E0040]: ../../error-index.html#E0040
+ /// [`panic!`]: crate::panic!
+ /// [`mem::drop`]: drop
+ /// [`ptr::drop_in_place`]: crate::ptr::drop_in_place
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn drop(&mut self);
+}
--- /dev/null
+/// The version of the call operator that takes an immutable receiver.
+///
+/// Instances of `Fn` can be called repeatedly without mutating state.
+///
+/// *This trait (`Fn`) is not to be confused with [function pointers]
+/// (`fn`).*
+///
+/// `Fn` is implemented automatically by closures which only take immutable
+/// references to captured variables or don't capture anything at all, as well
+/// as (safe) [function pointers] (with some caveats, see their documentation
+/// for more details). Additionally, for any type `F` that implements `Fn`, `&F`
+/// implements `Fn`, too.
+///
+/// Since both [`FnMut`] and [`FnOnce`] are supertraits of `Fn`, any
+/// instance of `Fn` can be used as a parameter where a [`FnMut`] or [`FnOnce`]
+/// is expected.
+///
+/// Use `Fn` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly and without mutating state (e.g., when
+/// calling it concurrently). If you do not need such strict requirements, use
+/// [`FnMut`] or [`FnOnce`] as bounds.
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: ../../std/primitive.fn.html
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a closure
+///
+/// ```
+/// let square = |x| x * x;
+/// assert_eq!(square(5), 25);
+/// ```
+///
+/// ## Using a `Fn` parameter
+///
+/// ```
+/// fn call_with_one<F>(func: F) -> usize
+/// where F: Fn(usize) -> usize {
+/// func(1)
+/// }
+///
+/// let double = |x| x * 2;
+/// assert_eq!(call_with_one(double), 2);
+/// ```
+#[lang = "fn"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ message = "expected a `{Fn}<{Args}>` closure, found `{Self}`",
+ label = "expected an `Fn<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+pub trait Fn<Args>: FnMut<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output;
+}
+
+/// The version of the call operator that takes a mutable receiver.
+///
+/// Instances of `FnMut` can be called repeatedly and may mutate state.
+///
+/// `FnMut` is implemented automatically by closures which take mutable
+/// references to captured variables, as well as all types that implement
+/// [`Fn`], e.g., (safe) [function pointers] (since `FnMut` is a supertrait of
+/// [`Fn`]). Additionally, for any type `F` that implements `FnMut`, `&mut F`
+/// implements `FnMut`, too.
+///
+/// Since [`FnOnce`] is a supertrait of `FnMut`, any instance of `FnMut` can be
+/// used where a [`FnOnce`] is expected, and since [`Fn`] is a subtrait of
+/// `FnMut`, any instance of [`Fn`] can be used where `FnMut` is expected.
+///
+/// Use `FnMut` as a bound when you want to accept a parameter of function-like
+/// type and need to call it repeatedly, while allowing it to mutate state.
+/// If you don't want the parameter to mutate state, use [`Fn`] as a
+/// bound; if you don't need to call it repeatedly, use [`FnOnce`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: ../../std/primitive.fn.html
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Calling a mutably capturing closure
+///
+/// ```
+/// let mut x = 5;
+/// {
+/// let mut square_x = || x *= x;
+/// square_x();
+/// }
+/// assert_eq!(x, 25);
+/// ```
+///
+/// ## Using a `FnMut` parameter
+///
+/// ```
+/// fn do_twice<F>(mut func: F)
+/// where F: FnMut()
+/// {
+/// func();
+/// func();
+/// }
+///
+/// let mut x: usize = 1;
+/// {
+/// let add_two_to_x = || x += 2;
+/// do_twice(add_two_to_x);
+/// }
+///
+/// assert_eq!(x, 5);
+/// ```
+#[lang = "fn_mut"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnMut}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnMut<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+pub trait FnMut<Args>: FnOnce<Args> {
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+/// The version of the call operator that takes a by-value receiver.
+///
+/// Instances of `FnOnce` can be called, but might not be callable multiple
+/// times. Because of this, if the only thing known about a type is that it
+/// implements `FnOnce`, it can only be called once.
+///
+/// `FnOnce` is implemented automatically by closures that might consume captured
+/// variables, as well as all types that implement [`FnMut`], e.g., (safe)
+/// [function pointers] (since `FnOnce` is a supertrait of [`FnMut`]).
+///
+/// Since both [`Fn`] and [`FnMut`] are subtraits of `FnOnce`, any instance of
+/// [`Fn`] or [`FnMut`] can be used where a `FnOnce` is expected.
+///
+/// Use `FnOnce` as a bound when you want to accept a parameter of function-like
+/// type and only need to call it once. If you need to call the parameter
+/// repeatedly, use [`FnMut`] as a bound; if you also need it to not mutate
+/// state, use [`Fn`].
+///
+/// See the [chapter on closures in *The Rust Programming Language*][book] for
+/// some more information on this topic.
+///
+/// Also of note is the special syntax for `Fn` traits (e.g.
+/// `Fn(usize, bool) -> usize`). Those interested in the technical details of
+/// this can refer to [the relevant section in the *Rustonomicon*][nomicon].
+///
+/// [book]: ../../book/ch13-01-closures.html
+/// [function pointers]: ../../std/primitive.fn.html
+/// [nomicon]: ../../nomicon/hrtb.html
+///
+/// # Examples
+///
+/// ## Using a `FnOnce` parameter
+///
+/// ```
+/// fn consume_with_relish<F>(func: F)
+/// where F: FnOnce() -> String
+/// {
+/// // `func` consumes its captured variables, so it cannot be run more
+/// // than once.
+/// println!("Consumed: {}", func());
+///
+/// println!("Delicious!");
+///
+/// // Attempting to invoke `func()` again will throw a `use of moved
+/// // value` error for `func`.
+/// }
+///
+/// let x = String::from("x");
+/// let consume_and_return_x = move || x;
+/// consume_with_relish(consume_and_return_x);
+///
+/// // `consume_and_return_x` can no longer be invoked at this point
+/// ```
+#[lang = "fn_once"]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_paren_sugar]
+#[rustc_on_unimplemented(
+ on(
+ Args = "()",
+ note = "wrap the `{Self}` in a closure with no arguments: `|| {{ /* code */ }}`"
+ ),
+ message = "expected a `{FnOnce}<{Args}>` closure, found `{Self}`",
+ label = "expected an `FnOnce<{Args}>` closure, found `{Self}`"
+)]
+#[fundamental] // so that regex can rely that `&str: !FnMut`
+#[must_use = "closures are lazy and do nothing unless called"]
+pub trait FnOnce<Args> {
+ /// The returned type after the call operator is used.
+ #[lang = "fn_once_output"]
+ #[stable(feature = "fn_once_output", since = "1.12.0")]
+ type Output;
+
+ /// Performs the call operation.
+ #[unstable(feature = "fn_traits", issue = "29625")]
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+mod impls {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> Fn<A> for &F
+ where
+ F: Fn<A>,
+ {
+ extern "rust-call" fn call(&self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnMut<A> for &F
+ where
+ F: Fn<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (**self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnOnce<A> for &F
+ where
+ F: Fn<A>,
+ {
+ type Output = F::Output;
+
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnMut<A> for &mut F
+ where
+ F: FnMut<A>,
+ {
+ extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<A, F: ?Sized> FnOnce<A> for &mut F
+ where
+ F: FnMut<A>,
+ {
+ type Output = F::Output;
+ extern "rust-call" fn call_once(self, args: A) -> F::Output {
+ (*self).call_mut(args)
+ }
+ }
+}
--- /dev/null
+use crate::marker::Unpin;
+use crate::pin::Pin;
+
+/// The result of a generator resumption.
+///
+/// This enum is returned from the `Generator::resume` method and indicates the
+/// possible return values of a generator. Currently this corresponds to either
+/// a suspension point (`Yielded`) or a termination point (`Complete`).
+#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[lang = "generator_state"]
+#[unstable(feature = "generator_trait", issue = "43122")]
+pub enum GeneratorState<Y, R> {
+ /// The generator suspended with a value.
+ ///
+ /// This state indicates that a generator has been suspended, and typically
+ /// corresponds to a `yield` statement. The value provided in this variant
+ /// corresponds to the expression passed to `yield` and allows generators to
+ /// provide a value each time they yield.
+ Yielded(Y),
+
+ /// The generator completed with a return value.
+ ///
+ /// This state indicates that a generator has finished execution with the
+ /// provided value. Once a generator has returned `Complete` it is
+ /// considered a programmer error to call `resume` again.
+ Complete(R),
+}
+
+/// The trait implemented by builtin generator types.
+///
+/// Generators, also commonly referred to as coroutines, are currently an
+/// experimental language feature in Rust. Added in [RFC 2033] generators are
+/// currently intended to primarily provide a building block for async/await
+/// syntax but will likely extend to also providing an ergonomic definition for
+/// iterators and other primitives.
+///
+/// The syntax and semantics for generators is unstable and will require a
+/// further RFC for stabilization. At this time, though, the syntax is
+/// closure-like:
+///
+/// ```rust
+/// #![feature(generators, generator_trait)]
+///
+/// use std::ops::{Generator, GeneratorState};
+/// use std::pin::Pin;
+///
+/// fn main() {
+/// let mut generator = || {
+/// yield 1;
+/// return "foo"
+/// };
+///
+/// match Pin::new(&mut generator).resume(()) {
+/// GeneratorState::Yielded(1) => {}
+/// _ => panic!("unexpected return from resume"),
+/// }
+/// match Pin::new(&mut generator).resume(()) {
+/// GeneratorState::Complete("foo") => {}
+/// _ => panic!("unexpected return from resume"),
+/// }
+/// }
+/// ```
+///
+/// More documentation of generators can be found in the unstable book.
+///
+/// [RFC 2033]: https://github.com/rust-lang/rfcs/pull/2033
+#[lang = "generator"]
+#[unstable(feature = "generator_trait", issue = "43122")]
+#[fundamental]
+pub trait Generator<R = ()> {
+ /// The type of value this generator yields.
+ ///
+ /// This associated type corresponds to the `yield` expression and the
+ /// values which are allowed to be returned each time a generator yields.
+ /// For example an iterator-as-a-generator would likely have this type as
+ /// `T`, the type being iterated over.
+ type Yield;
+
+ /// The type of value this generator returns.
+ ///
+ /// This corresponds to the type returned from a generator either with a
+ /// `return` statement or implicitly as the last expression of a generator
+ /// literal. For example futures would use this as `Result<T, E>` as it
+ /// represents a completed future.
+ type Return;
+
+ /// Resumes the execution of this generator.
+ ///
+ /// This function will resume execution of the generator or start execution
+ /// if it hasn't already. This call will return back into the generator's
+ /// last suspension point, resuming execution from the latest `yield`. The
+ /// generator will continue executing until it either yields or returns, at
+ /// which point this function will return.
+ ///
+ /// # Return value
+ ///
+ /// The `GeneratorState` enum returned from this function indicates what
+ /// state the generator is in upon returning. If the `Yielded` variant is
+ /// returned then the generator has reached a suspension point and a value
+ /// has been yielded out. Generators in this state are available for
+ /// resumption at a later point.
+ ///
+ /// If `Complete` is returned then the generator has completely finished
+ /// with the value provided. It is invalid for the generator to be resumed
+ /// again.
+ ///
+ /// # Panics
+ ///
+ /// This function may panic if it is called after the `Complete` variant has
+ /// been returned previously. While generator literals in the language are
+ /// guaranteed to panic on resuming after `Complete`, this is not guaranteed
+ /// for all implementations of the `Generator` trait.
+ fn resume(self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return>;
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R>, R> Generator<R> for Pin<&mut G> {
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume((*self).as_mut(), arg)
+ }
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R> + Unpin, R> Generator<R> for &mut G {
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume(Pin::new(&mut *self), arg)
+ }
+}
--- /dev/null
+/// Used for indexing operations (`container[index]`) in immutable contexts.
+///
+/// `container[index]` is actually syntactic sugar for `*container.index(index)`,
+/// but only when used as an immutable value. If a mutable value is requested,
+/// [`IndexMut`] is used instead. This allows nice things such as
+/// `let value = v[index]` if the type of `value` implements [`Copy`].
+///
+/// # Examples
+///
+/// The following example implements `Index` on a read-only `NucleotideCount`
+/// container, enabling individual counts to be retrieved with index syntax.
+///
+/// ```
+/// use std::ops::Index;
+///
+/// enum Nucleotide {
+/// A,
+/// C,
+/// G,
+/// T,
+/// }
+///
+/// struct NucleotideCount {
+/// a: usize,
+/// c: usize,
+/// g: usize,
+/// t: usize,
+/// }
+///
+/// impl Index<Nucleotide> for NucleotideCount {
+/// type Output = usize;
+///
+/// fn index(&self, nucleotide: Nucleotide) -> &Self::Output {
+/// match nucleotide {
+/// Nucleotide::A => &self.a,
+/// Nucleotide::C => &self.c,
+/// Nucleotide::G => &self.g,
+/// Nucleotide::T => &self.t,
+/// }
+/// }
+/// }
+///
+/// let nucleotide_count = NucleotideCount {a: 14, c: 9, g: 10, t: 12};
+/// assert_eq!(nucleotide_count[Nucleotide::A], 14);
+/// assert_eq!(nucleotide_count[Nucleotide::C], 9);
+/// assert_eq!(nucleotide_count[Nucleotide::G], 10);
+/// assert_eq!(nucleotide_count[Nucleotide::T], 12);
+/// ```
+#[lang = "index"]
+#[rustc_on_unimplemented(
+ message = "the type `{Self}` cannot be indexed by `{Idx}`",
+ label = "`{Self}` cannot be indexed by `{Idx}`"
+)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "]")]
+#[doc(alias = "[")]
+#[doc(alias = "[]")]
+pub trait Index<Idx: ?Sized> {
+ /// The returned type after indexing.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Output: ?Sized;
+
+ /// Performs the indexing (`container[index]`) operation.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+/// Used for indexing operations (`container[index]`) in mutable contexts.
+///
+/// `container[index]` is actually syntactic sugar for
+/// `*container.index_mut(index)`, but only when used as a mutable value. If
+/// an immutable value is requested, the [`Index`] trait is used instead. This
+/// allows nice things such as `v[index] = value`.
+///
+/// # Examples
+///
+/// A very simple implementation of a `Balance` struct that has two sides, where
+/// each can be indexed mutably and immutably.
+///
+/// ```
+/// use std::ops::{Index,IndexMut};
+///
+/// #[derive(Debug)]
+/// enum Side {
+/// Left,
+/// Right,
+/// }
+///
+/// #[derive(Debug, PartialEq)]
+/// enum Weight {
+/// Kilogram(f32),
+/// Pound(f32),
+/// }
+///
+/// struct Balance {
+/// pub left: Weight,
+/// pub right: Weight,
+/// }
+///
+/// impl Index<Side> for Balance {
+/// type Output = Weight;
+///
+/// fn index(&self, index: Side) -> &Self::Output {
+/// println!("Accessing {:?}-side of balance immutably", index);
+/// match index {
+/// Side::Left => &self.left,
+/// Side::Right => &self.right,
+/// }
+/// }
+/// }
+///
+/// impl IndexMut<Side> for Balance {
+/// fn index_mut(&mut self, index: Side) -> &mut Self::Output {
+/// println!("Accessing {:?}-side of balance mutably", index);
+/// match index {
+/// Side::Left => &mut self.left,
+/// Side::Right => &mut self.right,
+/// }
+/// }
+/// }
+///
+/// let mut balance = Balance {
+/// right: Weight::Kilogram(2.5),
+/// left: Weight::Pound(1.5),
+/// };
+///
+/// // In this case, `balance[Side::Right]` is sugar for
+/// // `*balance.index(Side::Right)`, since we are only *reading*
+/// // `balance[Side::Right]`, not writing it.
+/// assert_eq!(balance[Side::Right], Weight::Kilogram(2.5));
+///
+/// // However, in this case `balance[Side::Left]` is sugar for
+/// // `*balance.index_mut(Side::Left)`, since we are writing
+/// // `balance[Side::Left]`.
+/// balance[Side::Left] = Weight::Kilogram(3.0);
+/// ```
+#[lang = "index_mut"]
+#[rustc_on_unimplemented(
+ on(
+ _Self = "&str",
+ note = "you can use `.chars().nth()` or `.bytes().nth()`
+see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ on(
+ _Self = "str",
+ note = "you can use `.chars().nth()` or `.bytes().nth()`
+see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ on(
+ _Self = "std::string::String",
+ note = "you can use `.chars().nth()` or `.bytes().nth()`
+see chapter in The Book <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ message = "the type `{Self}` cannot be mutably indexed by `{Idx}`",
+ label = "`{Self}` cannot be mutably indexed by `{Idx}`"
+)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(alias = "[")]
+#[doc(alias = "]")]
+#[doc(alias = "[]")]
+pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
+ /// Performs the mutable indexing (`container[index]`) operation.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ fn index_mut(&mut self, index: Idx) -> &mut Self::Output;
+}
--- /dev/null
+//! Overloadable operators.
+//!
+//! Implementing these traits allows you to overload certain operators.
+//!
+//! Some of these traits are imported by the prelude, so they are available in
+//! every Rust program. Only operators backed by traits can be overloaded. For
+//! example, the addition operator (`+`) can be overloaded through the [`Add`]
+//! trait, but since the assignment operator (`=`) has no backing trait, there
+//! is no way of overloading its semantics. Additionally, this module does not
+//! provide any mechanism to create new operators. If traitless overloading or
+//! custom operators are required, you should look toward macros or compiler
+//! plugins to extend Rust's syntax.
+//!
+//! Implementations of operator traits should be unsurprising in their
+//! respective contexts, keeping in mind their usual meanings and
+//! [operator precedence]. For example, when implementing [`Mul`], the operation
+//! should have some resemblance to multiplication (and share expected
+//! properties like associativity).
+//!
+//! Note that the `&&` and `||` operators short-circuit, i.e., they only
+//! evaluate their second operand if it contributes to the result. Since this
+//! behavior is not enforceable by traits, `&&` and `||` are not supported as
+//! overloadable operators.
+//!
+//! Many of the operators take their operands by value. In non-generic
+//! contexts involving built-in types, this is usually not a problem.
+//! However, using these operators in generic code, requires some
+//! attention if values have to be reused as opposed to letting the operators
+//! consume them. One option is to occasionally use [`clone`].
+//! Another option is to rely on the types involved providing additional
+//! operator implementations for references. For example, for a user-defined
+//! type `T` which is supposed to support addition, it is probably a good
+//! idea to have both `T` and `&T` implement the traits [`Add<T>`][`Add`] and
+//! [`Add<&T>`][`Add`] so that generic code can be written without unnecessary
+//! cloning.
+//!
+//! # Examples
+//!
+//! This example creates a `Point` struct that implements [`Add`] and [`Sub`],
+//! and then demonstrates adding and subtracting two `Point`s.
+//!
+//! ```rust
+//! use std::ops::{Add, Sub};
+//!
+//! #[derive(Debug, Copy, Clone, PartialEq)]
+//! struct Point {
+//! x: i32,
+//! y: i32,
+//! }
+//!
+//! impl Add for Point {
+//! type Output = Self;
+//!
+//! fn add(self, other: Self) -> Self {
+//! Self {x: self.x + other.x, y: self.y + other.y}
+//! }
+//! }
+//!
+//! impl Sub for Point {
+//! type Output = Self;
+//!
+//! fn sub(self, other: Self) -> Self {
+//! Self {x: self.x - other.x, y: self.y - other.y}
+//! }
+//! }
+//!
+//! assert_eq!(Point {x: 3, y: 3}, Point {x: 1, y: 0} + Point {x: 2, y: 3});
+//! assert_eq!(Point {x: -1, y: -3}, Point {x: 1, y: 0} - Point {x: 2, y: 3});
+//! ```
+//!
+//! See the documentation for each trait for an example implementation.
+//!
+//! The [`Fn`], [`FnMut`], and [`FnOnce`] traits are implemented by types that can be
+//! invoked like functions. Note that [`Fn`] takes `&self`, [`FnMut`] takes `&mut
+//! self` and [`FnOnce`] takes `self`. These correspond to the three kinds of
+//! methods that can be invoked on an instance: call-by-reference,
+//! call-by-mutable-reference, and call-by-value. The most common use of these
+//! traits is to act as bounds to higher-level functions that take functions or
+//! closures as arguments.
+//!
+//! Taking a [`Fn`] as a parameter:
+//!
+//! ```rust
+//! fn call_with_one<F>(func: F) -> usize
+//! where F: Fn(usize) -> usize
+//! {
+//! func(1)
+//! }
+//!
+//! let double = |x| x * 2;
+//! assert_eq!(call_with_one(double), 2);
+//! ```
+//!
+//! Taking a [`FnMut`] as a parameter:
+//!
+//! ```rust
+//! fn do_twice<F>(mut func: F)
+//! where F: FnMut()
+//! {
+//! func();
+//! func();
+//! }
+//!
+//! let mut x: usize = 1;
+//! {
+//! let add_two_to_x = || x += 2;
+//! do_twice(add_two_to_x);
+//! }
+//!
+//! assert_eq!(x, 5);
+//! ```
+//!
+//! Taking a [`FnOnce`] as a parameter:
+//!
+//! ```rust
+//! fn consume_with_relish<F>(func: F)
+//! where F: FnOnce() -> String
+//! {
+//! // `func` consumes its captured variables, so it cannot be run more
+//! // than once
+//! println!("Consumed: {}", func());
+//!
+//! println!("Delicious!");
+//!
+//! // Attempting to invoke `func()` again will throw a `use of moved
+//! // value` error for `func`
+//! }
+//!
+//! let x = String::from("x");
+//! let consume_and_return_x = move || x;
+//! consume_with_relish(consume_and_return_x);
+//!
+//! // `consume_and_return_x` can no longer be invoked at this point
+//! ```
+//!
+//! [`clone`]: Clone::clone
+//! [operator precedence]: ../../reference/expressions.html#expression-precedence
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod arith;
+mod bit;
+mod control_flow;
+mod deref;
+mod drop;
+mod function;
+mod generator;
+mod index;
+mod range;
+mod r#try;
+mod unsize;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::arith::{Add, Div, Mul, Neg, Rem, Sub};
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+pub use self::arith::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::bit::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+#[stable(feature = "op_assign_traits", since = "1.8.0")]
+pub use self::bit::{BitAndAssign, BitOrAssign, BitXorAssign, ShlAssign, ShrAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::deref::{Deref, DerefMut};
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+pub use self::deref::Receiver;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::drop::Drop;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::function::{Fn, FnMut, FnOnce};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::index::{Index, IndexMut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::range::{Range, RangeFrom, RangeFull, RangeTo};
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+pub use self::range::{Bound, RangeBounds, RangeInclusive, RangeToInclusive};
+
+#[unstable(feature = "try_trait", issue = "42327")]
+pub use self::r#try::Try;
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+pub use self::generator::{Generator, GeneratorState};
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+pub use self::unsize::CoerceUnsized;
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+pub use self::unsize::DispatchFromDyn;
+
+#[unstable(feature = "control_flow_enum", reason = "new API", issue = "75744")]
+pub use self::control_flow::ControlFlow;
--- /dev/null
+use crate::fmt;
+use crate::hash::Hash;
+use crate::slice::index::{
+ slice_end_index_len_fail, slice_end_index_overflow_fail, slice_index_order_fail,
+ slice_start_index_overflow_fail,
+};
+
+/// An unbounded range (`..`).
+///
+/// `RangeFull` is primarily used as a [slicing index], its shorthand is `..`.
+/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// The `..` syntax is a `RangeFull`:
+///
+/// ```
+/// assert_eq!((..), std::ops::RangeFull);
+/// ```
+///
+/// It does not have an [`IntoIterator`] implementation, so you can't use it in
+/// a `for` loop directly. This won't compile:
+///
+/// ```compile_fail,E0277
+/// for i in .. {
+/// // ...
+/// }
+/// ```
+///
+/// Used as a [slicing index], `RangeFull` produces the full array as a slice.
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]); // This is the `RangeFull`
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+///
+/// [slicing index]: crate::slice::SliceIndex
+#[lang = "RangeFull"]
+#[doc(alias = "..")]
+#[derive(Copy, Clone, Default, PartialEq, Eq, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RangeFull;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for RangeFull {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "..")
+ }
+}
+
+/// A (half-open) range bounded inclusively below and exclusively above
+/// (`start..end`).
+///
+/// The range `start..end` contains all values with `start <= x < end`.
+/// It is empty if `start >= end`.
+///
+/// # Examples
+///
+/// The `start..end` syntax is a `Range`:
+///
+/// ```
+/// assert_eq!((3..5), std::ops::Range { start: 3, end: 5 });
+/// assert_eq!(3 + 4 + 5, (3..6).sum());
+/// ```
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]); // This is a `Range`
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+#[lang = "Range"]
+#[doc(alias = "..")]
+#[derive(Clone, Default, PartialEq, Eq, Hash)] // not Copy -- see #27186
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Range<Idx> {
+ /// The lower bound of the range (inclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub start: Idx,
+ /// The upper bound of the range (exclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub end: Idx,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<Idx: fmt::Debug> fmt::Debug for Range<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.start.fmt(fmt)?;
+ write!(fmt, "..")?;
+ self.end.fmt(fmt)?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> Range<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..5).contains(&2));
+ /// assert!( (3..5).contains(&3));
+ /// assert!( (3..5).contains(&4));
+ /// assert!(!(3..5).contains(&5));
+ ///
+ /// assert!(!(3..3).contains(&3));
+ /// assert!(!(3..2).contains(&3));
+ ///
+ /// assert!( (0.0..1.0).contains(&0.5));
+ /// assert!(!(0.0..1.0).contains(&f32::NAN));
+ /// assert!(!(0.0..f32::NAN).contains(&0.5));
+ /// assert!(!(f32::NAN..1.0).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+
+ /// Returns `true` if the range contains no items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..5).is_empty());
+ /// assert!( (3..3).is_empty());
+ /// assert!( (3..2).is_empty());
+ /// ```
+ ///
+ /// The range is empty if either side is incomparable:
+ ///
+ /// ```
+ /// assert!(!(3.0..5.0).is_empty());
+ /// assert!( (3.0..f32::NAN).is_empty());
+ /// assert!( (f32::NAN..5.0).is_empty());
+ /// ```
+ #[stable(feature = "range_is_empty", since = "1.47.0")]
+ pub fn is_empty(&self) -> bool {
+ !(self.start < self.end)
+ }
+}
+
+/// A range only bounded inclusively below (`start..`).
+///
+/// The `RangeFrom` `start..` contains all values with `x >= start`.
+///
+/// *Note*: Overflow in the [`Iterator`] implementation (when the contained
+/// data type reaches its numerical limit) is allowed to panic, wrap, or
+/// saturate. This behavior is defined by the implementation of the [`Step`]
+/// trait. For primitive integers, this follows the normal rules, and respects
+/// the overflow checks profile (panic in debug, wrap in release). Note also
+/// that overflow happens earlier than you might assume: the overflow happens
+/// in the call to `next` that yields the maximum value, as the range must be
+/// set to a state to yield the next value.
+///
+/// [`Step`]: crate::iter::Step
+///
+/// # Examples
+///
+/// The `start..` syntax is a `RangeFrom`:
+///
+/// ```
+/// assert_eq!((2..), std::ops::RangeFrom { start: 2 });
+/// assert_eq!(2 + 3 + 4, (2..).take(3).sum());
+/// ```
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]); // This is a `RangeFrom`
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+#[lang = "RangeFrom"]
+#[doc(alias = "..")]
+#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RangeFrom<Idx> {
+ /// The lower bound of the range (inclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub start: Idx,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeFrom<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.start.fmt(fmt)?;
+ write!(fmt, "..")?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeFrom<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..).contains(&2));
+ /// assert!( (3..).contains(&3));
+ /// assert!( (3..).contains(&1_000_000_000));
+ ///
+ /// assert!( (0.0..).contains(&0.5));
+ /// assert!(!(0.0..).contains(&f32::NAN));
+ /// assert!(!(f32::NAN..).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+}
+
+/// A range only bounded exclusively above (`..end`).
+///
+/// The `RangeTo` `..end` contains all values with `x < end`.
+/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// The `..end` syntax is a `RangeTo`:
+///
+/// ```
+/// assert_eq!((..5), std::ops::RangeTo { end: 5 });
+/// ```
+///
+/// It does not have an [`IntoIterator`] implementation, so you can't use it in
+/// a `for` loop directly. This won't compile:
+///
+/// ```compile_fail,E0277
+/// // error[E0277]: the trait bound `std::ops::RangeTo<{integer}>:
+/// // std::iter::Iterator` is not satisfied
+/// for i in ..5 {
+/// // ...
+/// }
+/// ```
+///
+/// When used as a [slicing index], `RangeTo` produces a slice of all array
+/// elements before the index indicated by `end`.
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]); // This is a `RangeTo`
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+///
+/// [slicing index]: crate::slice::SliceIndex
+#[lang = "RangeTo"]
+#[doc(alias = "..")]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RangeTo<Idx> {
+ /// The upper bound of the range (exclusive).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub end: Idx,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeTo<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "..")?;
+ self.end.fmt(fmt)?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeTo<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!( (..5).contains(&-1_000_000_000));
+ /// assert!( (..5).contains(&4));
+ /// assert!(!(..5).contains(&5));
+ ///
+ /// assert!( (..1.0).contains(&0.5));
+ /// assert!(!(..1.0).contains(&f32::NAN));
+ /// assert!(!(..f32::NAN).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+}
+
+/// A range bounded inclusively below and above (`start..=end`).
+///
+/// The `RangeInclusive` `start..=end` contains all values with `x >= start`
+/// and `x <= end`. It is empty unless `start <= end`.
+///
+/// This iterator is [fused], but the specific values of `start` and `end` after
+/// iteration has finished are **unspecified** other than that [`.is_empty()`]
+/// will return `true` once no more values will be produced.
+///
+/// [fused]: crate::iter::FusedIterator
+/// [`.is_empty()`]: RangeInclusive::is_empty
+///
+/// # Examples
+///
+/// The `start..=end` syntax is a `RangeInclusive`:
+///
+/// ```
+/// assert_eq!((3..=5), std::ops::RangeInclusive::new(3, 5));
+/// assert_eq!(3 + 4 + 5, (3..=5).sum());
+/// ```
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]);
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]); // This is a `RangeInclusive`
+/// ```
+#[lang = "RangeInclusive"]
+#[doc(alias = "..=")]
+#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+pub struct RangeInclusive<Idx> {
+ // Note that the fields here are not public to allow changing the
+ // representation in the future; in particular, while we could plausibly
+ // expose start/end, modifying them without changing (future/current)
+ // private fields may lead to incorrect behavior, so we don't want to
+ // support that mode.
+ pub(crate) start: Idx,
+ pub(crate) end: Idx,
+
+ // This field is:
+ // - `false` upon construction
+ // - `false` when iteration has yielded an element and the iterator is not exhausted
+ // - `true` when iteration has been used to exhaust the iterator
+ //
+ // This is required to support PartialEq and Hash without a PartialOrd bound or specialization.
+ pub(crate) exhausted: bool,
+}
+
+impl<Idx> RangeInclusive<Idx> {
+ /// Creates a new inclusive range. Equivalent to writing `start..=end`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ops::RangeInclusive;
+ ///
+ /// assert_eq!(3..=5, RangeInclusive::new(3, 5));
+ /// ```
+ #[lang = "range_inclusive_new"]
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[inline]
+ #[rustc_promotable]
+ #[rustc_const_stable(feature = "const_range_new", since = "1.32.0")]
+ pub const fn new(start: Idx, end: Idx) -> Self {
+ Self { start, end, exhausted: false }
+ }
+
+ /// Returns the lower bound of the range (inclusive).
+ ///
+ /// When using an inclusive range for iteration, the values of `start()` and
+ /// [`end()`] are unspecified after the iteration ended. To determine
+ /// whether the inclusive range is empty, use the [`is_empty()`] method
+ /// instead of comparing `start() > end()`.
+ ///
+ /// Note: the value returned by this method is unspecified after the range
+ /// has been iterated to exhaustion.
+ ///
+ /// [`end()`]: RangeInclusive::end
+ /// [`is_empty()`]: RangeInclusive::is_empty
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!((3..=5).start(), &3);
+ /// ```
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[rustc_const_stable(feature = "const_inclusive_range_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn start(&self) -> &Idx {
+ &self.start
+ }
+
+ /// Returns the upper bound of the range (inclusive).
+ ///
+ /// When using an inclusive range for iteration, the values of [`start()`]
+ /// and `end()` are unspecified after the iteration ended. To determine
+ /// whether the inclusive range is empty, use the [`is_empty()`] method
+ /// instead of comparing `start() > end()`.
+ ///
+ /// Note: the value returned by this method is unspecified after the range
+ /// has been iterated to exhaustion.
+ ///
+ /// [`start()`]: RangeInclusive::start
+ /// [`is_empty()`]: RangeInclusive::is_empty
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!((3..=5).end(), &5);
+ /// ```
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[rustc_const_stable(feature = "const_inclusive_range_methods", since = "1.32.0")]
+ #[inline]
+ pub const fn end(&self) -> &Idx {
+ &self.end
+ }
+
+ /// Destructures the `RangeInclusive` into (lower bound, upper (inclusive) bound).
+ ///
+ /// Note: the value returned by this method is unspecified after the range
+ /// has been iterated to exhaustion.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!((3..=5).into_inner(), (3, 5));
+ /// ```
+ #[stable(feature = "inclusive_range_methods", since = "1.27.0")]
+ #[inline]
+ pub fn into_inner(self) -> (Idx, Idx) {
+ (self.start, self.end)
+ }
+}
+
+impl RangeInclusive<usize> {
+ /// Converts to an exclusive `Range` for `SliceIndex` implementations.
+ /// The caller is responsible for dealing with `end == usize::MAX`.
+ #[inline]
+ pub(crate) fn into_slice_range(self) -> Range<usize> {
+ // If we're not exhausted, we want to simply slice `start..end + 1`.
+ // If we are exhausted, then slicing with `end + 1..end + 1` gives us an
+ // empty range that is still subject to bounds-checks for that endpoint.
+ let exclusive_end = self.end + 1;
+ let start = if self.exhausted { exclusive_end } else { self.start };
+ start..exclusive_end
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeInclusive<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.start.fmt(fmt)?;
+ write!(fmt, "..=")?;
+ self.end.fmt(fmt)?;
+ if self.exhausted {
+ write!(fmt, " (exhausted)")?;
+ }
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeInclusive<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..=5).contains(&2));
+ /// assert!( (3..=5).contains(&3));
+ /// assert!( (3..=5).contains(&4));
+ /// assert!( (3..=5).contains(&5));
+ /// assert!(!(3..=5).contains(&6));
+ ///
+ /// assert!( (3..=3).contains(&3));
+ /// assert!(!(3..=2).contains(&3));
+ ///
+ /// assert!( (0.0..=1.0).contains(&1.0));
+ /// assert!(!(0.0..=1.0).contains(&f32::NAN));
+ /// assert!(!(0.0..=f32::NAN).contains(&0.0));
+ /// assert!(!(f32::NAN..=1.0).contains(&1.0));
+ /// ```
+ ///
+ /// This method always returns `false` after iteration has finished:
+ ///
+ /// ```
+ /// let mut r = 3..=5;
+ /// assert!(r.contains(&3) && r.contains(&5));
+ /// for _ in r.by_ref() {}
+ /// // Precise field values are unspecified here
+ /// assert!(!r.contains(&3) && !r.contains(&5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+
+ /// Returns `true` if the range contains no items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!(!(3..=5).is_empty());
+ /// assert!(!(3..=3).is_empty());
+ /// assert!( (3..=2).is_empty());
+ /// ```
+ ///
+ /// The range is empty if either side is incomparable:
+ ///
+ /// ```
+ /// assert!(!(3.0..=5.0).is_empty());
+ /// assert!( (3.0..=f32::NAN).is_empty());
+ /// assert!( (f32::NAN..=5.0).is_empty());
+ /// ```
+ ///
+ /// This method returns `true` after iteration has finished:
+ ///
+ /// ```
+ /// let mut r = 3..=5;
+ /// for _ in r.by_ref() {}
+ /// // Precise field values are unspecified here
+ /// assert!(r.is_empty());
+ /// ```
+ #[stable(feature = "range_is_empty", since = "1.47.0")]
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.exhausted || !(self.start <= self.end)
+ }
+}
+
+/// A range only bounded inclusively above (`..=end`).
+///
+/// The `RangeToInclusive` `..=end` contains all values with `x <= end`.
+/// It cannot serve as an [`Iterator`] because it doesn't have a starting point.
+///
+/// # Examples
+///
+/// The `..=end` syntax is a `RangeToInclusive`:
+///
+/// ```
+/// assert_eq!((..=5), std::ops::RangeToInclusive{ end: 5 });
+/// ```
+///
+/// It does not have an [`IntoIterator`] implementation, so you can't use it in a
+/// `for` loop directly. This won't compile:
+///
+/// ```compile_fail,E0277
+/// // error[E0277]: the trait bound `std::ops::RangeToInclusive<{integer}>:
+/// // std::iter::Iterator` is not satisfied
+/// for i in ..=5 {
+/// // ...
+/// }
+/// ```
+///
+/// When used as a [slicing index], `RangeToInclusive` produces a slice of all
+/// array elements up to and including the index indicated by `end`.
+///
+/// ```
+/// let arr = [0, 1, 2, 3, 4];
+/// assert_eq!(arr[ .. ], [0, 1, 2, 3, 4]);
+/// assert_eq!(arr[ .. 3], [0, 1, 2 ]);
+/// assert_eq!(arr[ ..=3], [0, 1, 2, 3 ]); // This is a `RangeToInclusive`
+/// assert_eq!(arr[1.. ], [ 1, 2, 3, 4]);
+/// assert_eq!(arr[1.. 3], [ 1, 2 ]);
+/// assert_eq!(arr[1..=3], [ 1, 2, 3 ]);
+/// ```
+///
+/// [slicing index]: crate::slice::SliceIndex
+#[lang = "RangeToInclusive"]
+#[doc(alias = "..=")]
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+pub struct RangeToInclusive<Idx> {
+ /// The upper bound of the range (inclusive)
+ #[stable(feature = "inclusive_range", since = "1.26.0")]
+ pub end: Idx,
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl<Idx: fmt::Debug> fmt::Debug for RangeToInclusive<Idx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "..=")?;
+ self.end.fmt(fmt)?;
+ Ok(())
+ }
+}
+
+impl<Idx: PartialOrd<Idx>> RangeToInclusive<Idx> {
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!( (..=5).contains(&-1_000_000_000));
+ /// assert!( (..=5).contains(&5));
+ /// assert!(!(..=5).contains(&6));
+ ///
+ /// assert!( (..=1.0).contains(&1.0));
+ /// assert!(!(..=1.0).contains(&f32::NAN));
+ /// assert!(!(..=f32::NAN).contains(&0.5));
+ /// ```
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ pub fn contains<U>(&self, item: &U) -> bool
+ where
+ Idx: PartialOrd<U>,
+ U: ?Sized + PartialOrd<Idx>,
+ {
+ <Self as RangeBounds<Idx>>::contains(self, item)
+ }
+}
+
+// RangeToInclusive<Idx> cannot impl From<RangeTo<Idx>>
+// because underflow would be possible with (..0).into()
+
+/// An endpoint of a range of keys.
+///
+/// # Examples
+///
+/// `Bound`s are range endpoints:
+///
+/// ```
+/// use std::ops::Bound::*;
+/// use std::ops::RangeBounds;
+///
+/// assert_eq!((..100).start_bound(), Unbounded);
+/// assert_eq!((1..12).start_bound(), Included(&1));
+/// assert_eq!((1..12).end_bound(), Excluded(&12));
+/// ```
+///
+/// Using a tuple of `Bound`s as an argument to [`BTreeMap::range`].
+/// Note that in most cases, it's better to use range syntax (`1..5`) instead.
+///
+/// ```
+/// use std::collections::BTreeMap;
+/// use std::ops::Bound::{Excluded, Included, Unbounded};
+///
+/// let mut map = BTreeMap::new();
+/// map.insert(3, "a");
+/// map.insert(5, "b");
+/// map.insert(8, "c");
+///
+/// for (key, value) in map.range((Excluded(3), Included(8))) {
+/// println!("{}: {}", key, value);
+/// }
+///
+/// assert_eq!(Some((&3, &"a")), map.range((Unbounded, Included(5))).next());
+/// ```
+///
+/// [`BTreeMap::range`]: ../../std/collections/btree_map/struct.BTreeMap.html#method.range
+#[stable(feature = "collections_bound", since = "1.17.0")]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
+pub enum Bound<T> {
+ /// An inclusive bound.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
+ Included(#[stable(feature = "collections_bound", since = "1.17.0")] T),
+ /// An exclusive bound.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
+ Excluded(#[stable(feature = "collections_bound", since = "1.17.0")] T),
+ /// An infinite endpoint. Indicates that there is no bound in this direction.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
+ Unbounded,
+}
+
+impl<T: Clone> Bound<&T> {
+ /// Map a `Bound<&T>` to a `Bound<T>` by cloning the contents of the bound.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(bound_cloned)]
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((1..12).start_bound(), Included(&1));
+ /// assert_eq!((1..12).start_bound().cloned(), Included(1));
+ /// ```
+ #[unstable(feature = "bound_cloned", issue = "61356")]
+ pub fn cloned(self) -> Bound<T> {
+ match self {
+ Bound::Unbounded => Bound::Unbounded,
+ Bound::Included(x) => Bound::Included(x.clone()),
+ Bound::Excluded(x) => Bound::Excluded(x.clone()),
+ }
+ }
+}
+
+/// `RangeBounds` is implemented by Rust's built-in range types, produced
+/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`.
+#[stable(feature = "collections_range", since = "1.28.0")]
+pub trait RangeBounds<T: ?Sized> {
+ /// Start index bound.
+ ///
+ /// Returns the start value as a `Bound`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((..10).start_bound(), Unbounded);
+ /// assert_eq!((3..10).start_bound(), Included(&3));
+ /// # }
+ /// ```
+ #[stable(feature = "collections_range", since = "1.28.0")]
+ fn start_bound(&self) -> Bound<&T>;
+
+ /// End index bound.
+ ///
+ /// Returns the end value as a `Bound`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// use std::ops::Bound::*;
+ /// use std::ops::RangeBounds;
+ ///
+ /// assert_eq!((3..).end_bound(), Unbounded);
+ /// assert_eq!((3..10).end_bound(), Excluded(&10));
+ /// # }
+ /// ```
+ #[stable(feature = "collections_range", since = "1.28.0")]
+ fn end_bound(&self) -> Bound<&T>;
+
+ /// Performs bounds-checking of this range.
+ ///
+ /// The returned [`Range`] is safe to pass to [`slice::get_unchecked`] and
+ /// [`slice::get_unchecked_mut`] for slices of the given length.
+ ///
+ /// [`slice::get_unchecked`]: ../../std/primitive.slice.html#method.get_unchecked
+ /// [`slice::get_unchecked_mut`]: ../../std/primitive.slice.html#method.get_unchecked_mut
+ ///
+ /// # Panics
+ ///
+ /// Panics if the range would be out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(range_bounds_assert_len)]
+ ///
+ /// use std::ops::RangeBounds;
+ ///
+ /// let v = [10, 40, 30];
+ /// assert_eq!(1..2, (1..2).assert_len(v.len()));
+ /// assert_eq!(0..2, (..2).assert_len(v.len()));
+ /// assert_eq!(1..3, (1..).assert_len(v.len()));
+ /// ```
+ ///
+ /// Panics when [`Index::index`] would panic:
+ ///
+ /// ```should_panic
+ /// #![feature(range_bounds_assert_len)]
+ ///
+ /// use std::ops::RangeBounds;
+ ///
+ /// (2..1).assert_len(3);
+ /// ```
+ ///
+ /// ```should_panic
+ /// #![feature(range_bounds_assert_len)]
+ ///
+ /// use std::ops::RangeBounds;
+ ///
+ /// (1..4).assert_len(3);
+ /// ```
+ ///
+ /// ```should_panic
+ /// #![feature(range_bounds_assert_len)]
+ ///
+ /// use std::ops::RangeBounds;
+ ///
+ /// (1..=usize::MAX).assert_len(3);
+ /// ```
+ ///
+ /// [`Index::index`]: crate::ops::Index::index
+ #[track_caller]
+ #[unstable(feature = "range_bounds_assert_len", issue = "76393")]
+ fn assert_len(self, len: usize) -> Range<usize>
+ where
+ Self: RangeBounds<usize>,
+ {
+ let start: Bound<&usize> = self.start_bound();
+ let start = match start {
+ Bound::Included(&start) => start,
+ Bound::Excluded(start) => {
+ start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
+ }
+ Bound::Unbounded => 0,
+ };
+
+ let end: Bound<&usize> = self.end_bound();
+ let end = match end {
+ Bound::Included(end) => {
+ end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
+ }
+ Bound::Excluded(&end) => end,
+ Bound::Unbounded => len,
+ };
+
+ if start > end {
+ slice_index_order_fail(start, end);
+ }
+ if end > len {
+ slice_end_index_len_fail(end, len);
+ }
+
+ Range { start, end }
+ }
+
+ /// Returns `true` if `item` is contained in the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!( (3..5).contains(&4));
+ /// assert!(!(3..5).contains(&2));
+ ///
+ /// assert!( (0.0..1.0).contains(&0.5));
+ /// assert!(!(0.0..1.0).contains(&f32::NAN));
+ /// assert!(!(0.0..f32::NAN).contains(&0.5));
+ /// assert!(!(f32::NAN..1.0).contains(&0.5));
+ #[stable(feature = "range_contains", since = "1.35.0")]
+ fn contains<U>(&self, item: &U) -> bool
+ where
+ T: PartialOrd<U>,
+ U: ?Sized + PartialOrd<T>,
+ {
+ (match self.start_bound() {
+ Included(ref start) => *start <= item,
+ Excluded(ref start) => *start < item,
+ Unbounded => true,
+ }) && (match self.end_bound() {
+ Included(ref end) => item <= *end,
+ Excluded(ref end) => item < *end,
+ Unbounded => true,
+ })
+ }
+}
+
+use self::Bound::{Excluded, Included, Unbounded};
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T: ?Sized> RangeBounds<T> for RangeFull {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeFrom<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(&self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeTo<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(&self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for Range<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(&self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(&self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeInclusive<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(&self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ if self.exhausted {
+ // When the iterator is exhausted, we usually have start == end,
+ // but we want the range to appear empty, containing nothing.
+ Excluded(&self.end)
+ } else {
+ Included(&self.end)
+ }
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeToInclusive<T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Included(&self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for (Bound<T>, Bound<T>) {
+ fn start_bound(&self) -> Bound<&T> {
+ match *self {
+ (Included(ref start), _) => Included(start),
+ (Excluded(ref start), _) => Excluded(start),
+ (Unbounded, _) => Unbounded,
+ }
+ }
+
+ fn end_bound(&self) -> Bound<&T> {
+ match *self {
+ (_, Included(ref end)) => Included(end),
+ (_, Excluded(ref end)) => Excluded(end),
+ (_, Unbounded) => Unbounded,
+ }
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<'a, T: ?Sized + 'a> RangeBounds<T> for (Bound<&'a T>, Bound<&'a T>) {
+ fn start_bound(&self) -> Bound<&T> {
+ self.0
+ }
+
+ fn end_bound(&self) -> Bound<&T> {
+ self.1
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeFrom<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeTo<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for Range<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Excluded(self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeInclusive<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Included(self.start)
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Included(self.end)
+ }
+}
+
+#[stable(feature = "collections_range", since = "1.28.0")]
+impl<T> RangeBounds<T> for RangeToInclusive<&T> {
+ fn start_bound(&self) -> Bound<&T> {
+ Unbounded
+ }
+ fn end_bound(&self) -> Bound<&T> {
+ Included(self.end)
+ }
+}
--- /dev/null
+/// A trait for customizing the behavior of the `?` operator.
+///
+/// A type implementing `Try` is one that has a canonical way to view it
+/// in terms of a success/failure dichotomy. This trait allows both
+/// extracting those success or failure values from an existing instance and
+/// creating a new instance from a success or failure value.
+#[unstable(feature = "try_trait", issue = "42327")]
+#[rustc_on_unimplemented(
+ on(
+ all(
+ any(from_method = "from_error", from_method = "from_ok"),
+ from_desugaring = "QuestionMark"
+ ),
+ message = "the `?` operator can only be used in {ItemContext} \
+ that returns `Result` or `Option` \
+ (or another type that implements `{Try}`)",
+ label = "cannot use the `?` operator in {ItemContext} that returns `{Self}`",
+ enclosing_scope = "this function should return `Result` or `Option` to accept `?`"
+ ),
+ on(
+ all(from_method = "into_result", from_desugaring = "QuestionMark"),
+ message = "the `?` operator can only be applied to values \
+ that implement `{Try}`",
+ label = "the `?` operator cannot be applied to type `{Self}`"
+ )
+)]
+#[doc(alias = "?")]
+#[lang = "try"]
+pub trait Try {
+ /// The type of this value when viewed as successful.
+ #[unstable(feature = "try_trait", issue = "42327")]
+ type Ok;
+ /// The type of this value when viewed as failed.
+ #[unstable(feature = "try_trait", issue = "42327")]
+ type Error;
+
+ /// Applies the "?" operator. A return of `Ok(t)` means that the
+ /// execution should continue normally, and the result of `?` is the
+ /// value `t`. A return of `Err(e)` means that execution should branch
+ /// to the innermost enclosing `catch`, or return from the function.
+ ///
+ /// If an `Err(e)` result is returned, the value `e` will be "wrapped"
+ /// in the return type of the enclosing scope (which must itself implement
+ /// `Try`). Specifically, the value `X::from_error(From::from(e))`
+ /// is returned, where `X` is the return type of the enclosing function.
+ #[lang = "into_result"]
+ #[unstable(feature = "try_trait", issue = "42327")]
+ fn into_result(self) -> Result<Self::Ok, Self::Error>;
+
+ /// Wrap an error value to construct the composite result. For example,
+ /// `Result::Err(x)` and `Result::from_error(x)` are equivalent.
+ #[lang = "from_error"]
+ #[unstable(feature = "try_trait", issue = "42327")]
+ fn from_error(v: Self::Error) -> Self;
+
+ /// Wrap an OK value to construct the composite result. For example,
+ /// `Result::Ok(x)` and `Result::from_ok(x)` are equivalent.
+ #[lang = "from_ok"]
+ #[unstable(feature = "try_trait", issue = "42327")]
+ fn from_ok(v: Self::Ok) -> Self;
+}
--- /dev/null
+use crate::marker::Unsize;
+
+/// Trait that indicates that this is a pointer or a wrapper for one,
+/// where unsizing can be performed on the pointee.
+///
+/// See the [DST coercion RFC][dst-coerce] and [the nomicon entry on coercion][nomicon-coerce]
+/// for more details.
+///
+/// For builtin pointer types, pointers to `T` will coerce to pointers to `U` if `T: Unsize<U>`
+/// by converting from a thin pointer to a fat pointer.
+///
+/// For custom types, the coercion here works by coercing `Foo<T>` to `Foo<U>`
+/// provided an impl of `CoerceUnsized<Foo<U>> for Foo<T>` exists.
+/// Such an impl can only be written if `Foo<T>` has only a single non-phantomdata
+/// field involving `T`. If the type of that field is `Bar<T>`, an implementation
+/// of `CoerceUnsized<Bar<U>> for Bar<T>` must exist. The coercion will work by
+/// coercing the `Bar<T>` field into `Bar<U>` and filling in the rest of the fields
+/// from `Foo<T>` to create a `Foo<U>`. This will effectively drill down to a pointer
+/// field and coerce that.
+///
+/// Generally, for smart pointers you will implement
+/// `CoerceUnsized<Ptr<U>> for Ptr<T> where T: Unsize<U>, U: ?Sized`, with an
+/// optional `?Sized` bound on `T` itself. For wrapper types that directly embed `T`
+/// like `Cell<T>` and `RefCell<T>`, you
+/// can directly implement `CoerceUnsized<Wrap<U>> for Wrap<T> where T: CoerceUnsized<U>`.
+/// This will let coercions of types like `Cell<Box<T>>` work.
+///
+/// [`Unsize`][unsize] is used to mark types which can be coerced to DSTs if behind
+/// pointers. It is implemented automatically by the compiler.
+///
+/// [dst-coerce]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
+/// [unsize]: crate::marker::Unsize
+/// [nomicon-coerce]: ../../nomicon/coercions.html
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T: ?Sized> {
+ // Empty.
+}
+
+// &mut T -> &mut U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+// &mut T -> &U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {}
+// &mut T -> *mut U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {}
+// &mut T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {}
+
+// &T -> &U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+// &T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for &'a T {}
+
+// *mut T -> *mut U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+// *mut T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *mut T {}
+
+// *const T -> *const U
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+
+/// This is used for object safety, to check that a method's receiver type can be dispatched on.
+///
+/// An example implementation of the trait:
+///
+/// ```
+/// # #![feature(dispatch_from_dyn, unsize)]
+/// # use std::{ops::DispatchFromDyn, marker::Unsize};
+/// # struct Rc<T: ?Sized>(std::rc::Rc<T>);
+/// impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T>
+/// where
+/// T: Unsize<U>,
+/// {}
+/// ```
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {
+ // Empty.
+}
+
+// &T -> &U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
--- /dev/null
+//! Optional values.
+//!
+//! Type [`Option`] represents an optional value: every [`Option`]
+//! is either [`Some`] and contains a value, or [`None`], and
+//! does not. [`Option`] types are very common in Rust code, as
+//! they have a number of uses:
+//!
+//! * Initial values
+//! * Return values for functions that are not defined
+//! over their entire input range (partial functions)
+//! * Return value for otherwise reporting simple errors, where [`None`] is
+//! returned on error
+//! * Optional struct fields
+//! * Struct fields that can be loaned or "taken"
+//! * Optional function arguments
+//! * Nullable pointers
+//! * Swapping things out of difficult situations
+//!
+//! [`Option`]s are commonly paired with pattern matching to query the presence
+//! of a value and take action, always accounting for the [`None`] case.
+//!
+//! ```
+//! fn divide(numerator: f64, denominator: f64) -> Option<f64> {
+//! if denominator == 0.0 {
+//! None
+//! } else {
+//! Some(numerator / denominator)
+//! }
+//! }
+//!
+//! // The return value of the function is an option
+//! let result = divide(2.0, 3.0);
+//!
+//! // Pattern match to retrieve the value
+//! match result {
+//! // The division was valid
+//! Some(x) => println!("Result: {}", x),
+//! // The division was invalid
+//! None => println!("Cannot divide by 0"),
+//! }
+//! ```
+//!
+//
+// FIXME: Show how `Option` is used in practice, with lots of methods
+//
+//! # Options and pointers ("nullable" pointers)
+//!
+//! Rust's pointer types must always point to a valid location; there are
+//! no "null" references. Instead, Rust has *optional* pointers, like
+//! the optional owned box, [`Option`]`<`[`Box<T>`]`>`.
+//!
+//! The following example uses [`Option`] to create an optional box of
+//! [`i32`]. Notice that in order to use the inner [`i32`] value first, the
+//! `check_optional` function needs to use pattern matching to
+//! determine whether the box has a value (i.e., it is [`Some(...)`][`Some`]) or
+//! not ([`None`]).
+//!
+//! ```
+//! let optional = None;
+//! check_optional(optional);
+//!
+//! let optional = Some(Box::new(9000));
+//! check_optional(optional);
+//!
+//! fn check_optional(optional: Option<Box<i32>>) {
+//! match optional {
+//! Some(p) => println!("has value {}", p),
+//! None => println!("has no value"),
+//! }
+//! }
+//! ```
+//!
+//! # Representation
+//!
+//! Rust guarantees to optimize the following types `T` such that
+//! [`Option<T>`] has the same size as `T`:
+//!
+//! * [`Box<U>`]
+//! * `&U`
+//! * `&mut U`
+//! * `fn`, `extern "C" fn`
+//! * [`num::NonZero*`]
+//! * [`ptr::NonNull<U>`]
+//! * `#[repr(transparent)]` struct around one of the types in this list.
+//!
+//! It is further guaranteed that, for the cases above, one can
+//! [`mem::transmute`] from all valid values of `T` to `Option<T>` and
+//! from `Some::<T>(_)` to `T` (but transmuting `None::<T>` to `T`
+//! is undefined behaviour).
+//!
+//! # Examples
+//!
+//! Basic pattern matching on [`Option`]:
+//!
+//! ```
+//! let msg = Some("howdy");
+//!
+//! // Take a reference to the contained string
+//! if let Some(m) = &msg {
+//! println!("{}", *m);
+//! }
+//!
+//! // Remove the contained string, destroying the Option
+//! let unwrapped_msg = msg.unwrap_or("default message");
+//! ```
+//!
+//! Initialize a result to [`None`] before a loop:
+//!
+//! ```
+//! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) }
+//!
+//! // A list of data to search through.
+//! let all_the_big_things = [
+//! Kingdom::Plant(250, "redwood"),
+//! Kingdom::Plant(230, "noble fir"),
+//! Kingdom::Plant(229, "sugar pine"),
+//! Kingdom::Animal(25, "blue whale"),
+//! Kingdom::Animal(19, "fin whale"),
+//! Kingdom::Animal(15, "north pacific right whale"),
+//! ];
+//!
+//! // We're going to search for the name of the biggest animal,
+//! // but to start with we've just got `None`.
+//! let mut name_of_biggest_animal = None;
+//! let mut size_of_biggest_animal = 0;
+//! for big_thing in &all_the_big_things {
+//! match *big_thing {
+//! Kingdom::Animal(size, name) if size > size_of_biggest_animal => {
+//! // Now we've found the name of some big animal
+//! size_of_biggest_animal = size;
+//! name_of_biggest_animal = Some(name);
+//! }
+//! Kingdom::Animal(..) | Kingdom::Plant(..) => ()
+//! }
+//! }
+//!
+//! match name_of_biggest_animal {
+//! Some(name) => println!("the biggest animal is {}", name),
+//! None => println!("there are no animals :("),
+//! }
+//! ```
+//!
+//! [`Box<T>`]: ../../std/boxed/struct.Box.html
+//! [`Box<U>`]: ../../std/boxed/struct.Box.html
+//! [`num::NonZero*`]: crate::num
+//! [`ptr::NonNull<U>`]: crate::ptr::NonNull
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::iter::{FromIterator, FusedIterator, TrustedLen};
+use crate::pin::Pin;
+use crate::{
+ convert, fmt, hint, mem,
+ ops::{self, Deref, DerefMut},
+};
+
+/// The `Option` type. See [the module level documentation](self) for more.
+#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[rustc_diagnostic_item = "option_type"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Option<T> {
+ /// No value
+ #[lang = "None"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ None,
+ /// Some value `T`
+ #[lang = "Some"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Some(#[stable(feature = "rust1", since = "1.0.0")] T),
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Type implementation
+/////////////////////////////////////////////////////////////////////////////
+
+impl<T> Option<T> {
+ /////////////////////////////////////////////////////////////////////////
+ // Querying the contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `true` if the option is a [`Some`] value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.is_some(), true);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.is_some(), false);
+ /// ```
+ #[must_use = "if you intended to assert that this has a value, consider `.unwrap()` instead"]
+ #[inline]
+ #[rustc_const_stable(feature = "const_option", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn is_some(&self) -> bool {
+ matches!(*self, Some(_))
+ }
+
+ /// Returns `true` if the option is a [`None`] value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.is_none(), false);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.is_none(), true);
+ /// ```
+ #[must_use = "if you intended to assert that this doesn't have a value, consider \
+ `.and_then(|| panic!(\"`Option` had a value when expected `None`\"))` instead"]
+ #[inline]
+ #[rustc_const_stable(feature = "const_option", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn is_none(&self) -> bool {
+ !self.is_some()
+ }
+
+ /// Returns `true` if the option is a [`Some`] value containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_result_contains)]
+ ///
+ /// let x: Option<u32> = Some(2);
+ /// assert_eq!(x.contains(&2), true);
+ ///
+ /// let x: Option<u32> = Some(3);
+ /// assert_eq!(x.contains(&2), false);
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.contains(&2), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "option_result_contains", issue = "62358")]
+ pub fn contains<U>(&self, x: &U) -> bool
+ where
+ U: PartialEq<T>,
+ {
+ match self {
+ Some(y) => x == y,
+ None => false,
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Adapter for working with references
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Converts from `&Option<T>` to `Option<&T>`.
+ ///
+ /// # Examples
+ ///
+ /// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original.
+ /// The [`map`] method takes the `self` argument by value, consuming the original,
+ /// so this technique uses `as_ref` to first take an `Option` to a reference
+ /// to the value inside the original.
+ ///
+ /// [`map`]: Option::map
+ /// [`String`]: ../../std/string/struct.String.html
+ ///
+ /// ```
+ /// let text: Option<String> = Some("Hello, world!".to_string());
+ /// // First, cast `Option<String>` to `Option<&String>` with `as_ref`,
+ /// // then consume *that* with `map`, leaving `text` on the stack.
+ /// let text_length: Option<usize> = text.as_ref().map(|s| s.len());
+ /// println!("still can print text: {:?}", text);
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_option", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn as_ref(&self) -> Option<&T> {
+ match *self {
+ Some(ref x) => Some(x),
+ None => None,
+ }
+ }
+
+ /// Converts from `&mut Option<T>` to `Option<&mut T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(2);
+ /// match x.as_mut() {
+ /// Some(v) => *v = 42,
+ /// None => {},
+ /// }
+ /// assert_eq!(x, Some(42));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_mut(&mut self) -> Option<&mut T> {
+ match *self {
+ Some(ref mut x) => Some(x),
+ None => None,
+ }
+ }
+
+ /// Converts from [`Pin`]`<&Option<T>>` to `Option<`[`Pin`]`<&T>>`.
+ #[inline]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub fn as_pin_ref(self: Pin<&Self>) -> Option<Pin<&T>> {
+ // SAFETY: `x` is guaranteed to be pinned because it comes from `self`
+ // which is pinned.
+ unsafe { Pin::get_ref(self).as_ref().map(|x| Pin::new_unchecked(x)) }
+ }
+
+ /// Converts from [`Pin`]`<&mut Option<T>>` to `Option<`[`Pin`]`<&mut T>>`.
+ #[inline]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub fn as_pin_mut(self: Pin<&mut Self>) -> Option<Pin<&mut T>> {
+ // SAFETY: `get_unchecked_mut` is never used to move the `Option` inside `self`.
+ // `x` is guaranteed to be pinned because it comes from `self` which is pinned.
+ unsafe { Pin::get_unchecked_mut(self).as_mut().map(|x| Pin::new_unchecked(x)) }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Getting to contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns the contained [`Some`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is a [`None`] with a custom panic message provided by
+ /// `msg`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("value");
+ /// assert_eq!(x.expect("fruits are healthy"), "value");
+ /// ```
+ ///
+ /// ```{.should_panic}
+ /// let x: Option<&str> = None;
+ /// x.expect("fruits are healthy"); // panics with `fruits are healthy`
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn expect(self, msg: &str) -> T {
+ match self {
+ Some(val) => val,
+ None => expect_failed(msg),
+ }
+ }
+
+ /// Returns the contained [`Some`] value, consuming the `self` value.
+ ///
+ /// Because this function may panic, its use is generally discouraged.
+ /// Instead, prefer to use pattern matching and handle the [`None`]
+ /// case explicitly, or call [`unwrap_or`], [`unwrap_or_else`], or
+ /// [`unwrap_or_default`].
+ ///
+ /// [`unwrap_or`]: Option::unwrap_or
+ /// [`unwrap_or_else`]: Option::unwrap_or_else
+ /// [`unwrap_or_default`]: Option::unwrap_or_default
+ ///
+ /// # Panics
+ ///
+ /// Panics if the self value equals [`None`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("air");
+ /// assert_eq!(x.unwrap(), "air");
+ /// ```
+ ///
+ /// ```{.should_panic}
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.unwrap(), "air"); // fails
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ pub const fn unwrap(self) -> T {
+ match self {
+ Some(val) => val,
+ None => panic!("called `Option::unwrap()` on a `None` value"),
+ }
+ }
+
+ /// Returns the contained [`Some`] value or a provided default.
+ ///
+ /// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`unwrap_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`unwrap_or_else`]: Option::unwrap_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Some("car").unwrap_or("bike"), "car");
+ /// assert_eq!(None.unwrap_or("bike"), "bike");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_or(self, default: T) -> T {
+ match self {
+ Some(x) => x,
+ None => default,
+ }
+ }
+
+ /// Returns the contained [`Some`] value or computes it from a closure.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let k = 10;
+ /// assert_eq!(Some(4).unwrap_or_else(|| 2 * k), 4);
+ /// assert_eq!(None.unwrap_or_else(|| 2 * k), 20);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
+ match self {
+ Some(x) => x,
+ None => f(),
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Transforming contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Maps an `Option<T>` to `Option<U>` by applying a function to a contained value.
+ ///
+ /// # Examples
+ ///
+ /// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, consuming the original:
+ ///
+ /// [`String`]: ../../std/string/struct.String.html
+ /// ```
+ /// let maybe_some_string = Some(String::from("Hello, World!"));
+ /// // `Option::map` takes self *by value*, consuming `maybe_some_string`
+ /// let maybe_some_len = maybe_some_string.map(|s| s.len());
+ ///
+ /// assert_eq!(maybe_some_len, Some(13));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Option<U> {
+ match self {
+ Some(x) => Some(f(x)),
+ None => None,
+ }
+ }
+
+ /// Applies a function to the contained value (if any),
+ /// or returns the provided default (if not).
+ ///
+ /// Arguments passed to `map_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`map_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`map_or_else`]: Option::map_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("foo");
+ /// assert_eq!(x.map_or(42, |v| v.len()), 3);
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.map_or(42, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
+ match self {
+ Some(t) => f(t),
+ None => default,
+ }
+ }
+
+ /// Applies a function to the contained value (if any),
+ /// or computes a default (if not).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let k = 21;
+ ///
+ /// let x = Some("foo");
+ /// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 3);
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.map_or_else(|| 2 * k, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map_or_else<U, D: FnOnce() -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
+ match self {
+ Some(t) => f(t),
+ None => default(),
+ }
+ }
+
+ /// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
+ /// [`Ok(v)`] and [`None`] to [`Err(err)`].
+ ///
+ /// Arguments passed to `ok_or` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`ok_or_else`], which is
+ /// lazily evaluated.
+ ///
+ /// [`Result<T, E>`]: Result
+ /// [`Ok(v)`]: Ok
+ /// [`Err(err)`]: Err
+ /// [`Some(v)`]: Some
+ /// [`ok_or_else`]: Option::ok_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("foo");
+ /// assert_eq!(x.ok_or(0), Ok("foo"));
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.ok_or(0), Err(0));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn ok_or<E>(self, err: E) -> Result<T, E> {
+ match self {
+ Some(v) => Ok(v),
+ None => Err(err),
+ }
+ }
+
+ /// Transforms the `Option<T>` into a [`Result<T, E>`], mapping [`Some(v)`] to
+ /// [`Ok(v)`] and [`None`] to [`Err(err())`].
+ ///
+ /// [`Result<T, E>`]: Result
+ /// [`Ok(v)`]: Ok
+ /// [`Err(err())`]: Err
+ /// [`Some(v)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("foo");
+ /// assert_eq!(x.ok_or_else(|| 0), Ok("foo"));
+ ///
+ /// let x: Option<&str> = None;
+ /// assert_eq!(x.ok_or_else(|| 0), Err(0));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn ok_or_else<E, F: FnOnce() -> E>(self, err: F) -> Result<T, E> {
+ match self {
+ Some(v) => Ok(v),
+ None => Err(err()),
+ }
+ }
+
+ /// Inserts `value` into the option then returns a mutable reference to it.
+ ///
+ /// If the option already contains a value, the old value is dropped.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![feature(option_insert)]
+ ///
+ /// let mut opt = None;
+ /// let val = opt.insert(1);
+ /// assert_eq!(*val, 1);
+ /// assert_eq!(opt.unwrap(), 1);
+ /// let val = opt.insert(2);
+ /// assert_eq!(*val, 2);
+ /// *val = 3;
+ /// assert_eq!(opt.unwrap(), 3);
+ /// ```
+ #[inline]
+ #[unstable(feature = "option_insert", reason = "newly added", issue = "78271")]
+ pub fn insert(&mut self, value: T) -> &mut T {
+ *self = Some(value);
+
+ match self {
+ Some(v) => v,
+ // SAFETY: the code above just filled the option
+ None => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Iterator constructors
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns an iterator over the possibly contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(4);
+ /// assert_eq!(x.iter().next(), Some(&4));
+ ///
+ /// let x: Option<u32> = None;
+ /// assert_eq!(x.iter().next(), None);
+ /// ```
+ #[inline]
+ #[rustc_const_unstable(feature = "const_option", issue = "67441")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn iter(&self) -> Iter<'_, T> {
+ Iter { inner: Item { opt: self.as_ref() } }
+ }
+
+ /// Returns a mutable iterator over the possibly contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(4);
+ /// match x.iter_mut().next() {
+ /// Some(v) => *v = 42,
+ /// None => {},
+ /// }
+ /// assert_eq!(x, Some(42));
+ ///
+ /// let mut x: Option<u32> = None;
+ /// assert_eq!(x.iter_mut().next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut { inner: Item { opt: self.as_mut() } }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Boolean operations on the values, eager and lazy
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns [`None`] if the option is [`None`], otherwise returns `optb`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(2);
+ /// let y: Option<&str> = None;
+ /// assert_eq!(x.and(y), None);
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = Some("foo");
+ /// assert_eq!(x.and(y), None);
+ ///
+ /// let x = Some(2);
+ /// let y = Some("foo");
+ /// assert_eq!(x.and(y), Some("foo"));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y: Option<&str> = None;
+ /// assert_eq!(x.and(y), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn and<U>(self, optb: Option<U>) -> Option<U> {
+ match self {
+ Some(_) => optb,
+ None => None,
+ }
+ }
+
+ /// Returns [`None`] if the option is [`None`], otherwise calls `f` with the
+ /// wrapped value and returns the result.
+ ///
+ /// Some languages call this operation flatmap.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn sq(x: u32) -> Option<u32> { Some(x * x) }
+ /// fn nope(_: u32) -> Option<u32> { None }
+ ///
+ /// assert_eq!(Some(2).and_then(sq).and_then(sq), Some(16));
+ /// assert_eq!(Some(2).and_then(sq).and_then(nope), None);
+ /// assert_eq!(Some(2).and_then(nope).and_then(sq), None);
+ /// assert_eq!(None.and_then(sq).and_then(sq), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn and_then<U, F: FnOnce(T) -> Option<U>>(self, f: F) -> Option<U> {
+ match self {
+ Some(x) => f(x),
+ None => None,
+ }
+ }
+
+ /// Returns [`None`] if the option is [`None`], otherwise calls `predicate`
+ /// with the wrapped value and returns:
+ ///
+ /// - [`Some(t)`] if `predicate` returns `true` (where `t` is the wrapped
+ /// value), and
+ /// - [`None`] if `predicate` returns `false`.
+ ///
+ /// This function works similar to [`Iterator::filter()`]. You can imagine
+ /// the `Option<T>` being an iterator over one or zero elements. `filter()`
+ /// lets you decide which elements to keep.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// fn is_even(n: &i32) -> bool {
+ /// n % 2 == 0
+ /// }
+ ///
+ /// assert_eq!(None.filter(is_even), None);
+ /// assert_eq!(Some(3).filter(is_even), None);
+ /// assert_eq!(Some(4).filter(is_even), Some(4));
+ /// ```
+ ///
+ /// [`Some(t)`]: Some
+ #[inline]
+ #[stable(feature = "option_filter", since = "1.27.0")]
+ pub fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
+ if let Some(x) = self {
+ if predicate(&x) {
+ return Some(x);
+ }
+ }
+ None
+ }
+
+ /// Returns the option if it contains a value, otherwise returns `optb`.
+ ///
+ /// Arguments passed to `or` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`or_else`], which is
+ /// lazily evaluated.
+ ///
+ /// [`or_else`]: Option::or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(2);
+ /// let y = None;
+ /// assert_eq!(x.or(y), Some(2));
+ ///
+ /// let x = None;
+ /// let y = Some(100);
+ /// assert_eq!(x.or(y), Some(100));
+ ///
+ /// let x = Some(2);
+ /// let y = Some(100);
+ /// assert_eq!(x.or(y), Some(2));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = None;
+ /// assert_eq!(x.or(y), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or(self, optb: Option<T>) -> Option<T> {
+ match self {
+ Some(_) => self,
+ None => optb,
+ }
+ }
+
+ /// Returns the option if it contains a value, otherwise calls `f` and
+ /// returns the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// fn nobody() -> Option<&'static str> { None }
+ /// fn vikings() -> Option<&'static str> { Some("vikings") }
+ ///
+ /// assert_eq!(Some("barbarians").or_else(vikings), Some("barbarians"));
+ /// assert_eq!(None.or_else(vikings), Some("vikings"));
+ /// assert_eq!(None.or_else(nobody), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_else<F: FnOnce() -> Option<T>>(self, f: F) -> Option<T> {
+ match self {
+ Some(_) => self,
+ None => f(),
+ }
+ }
+
+ /// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns [`None`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(2);
+ /// let y: Option<u32> = None;
+ /// assert_eq!(x.xor(y), Some(2));
+ ///
+ /// let x: Option<u32> = None;
+ /// let y = Some(2);
+ /// assert_eq!(x.xor(y), Some(2));
+ ///
+ /// let x = Some(2);
+ /// let y = Some(2);
+ /// assert_eq!(x.xor(y), None);
+ ///
+ /// let x: Option<u32> = None;
+ /// let y: Option<u32> = None;
+ /// assert_eq!(x.xor(y), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "option_xor", since = "1.37.0")]
+ pub fn xor(self, optb: Option<T>) -> Option<T> {
+ match (self, optb) {
+ (Some(a), None) => Some(a),
+ (None, Some(b)) => Some(b),
+ _ => None,
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Entry-like operations to insert if None and return a reference
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Inserts `value` into the option if it is [`None`], then
+ /// returns a mutable reference to the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert(5);
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[stable(feature = "option_entry", since = "1.20.0")]
+ pub fn get_or_insert(&mut self, value: T) -> &mut T {
+ self.get_or_insert_with(|| value)
+ }
+
+ /// Inserts a value computed from `f` into the option if it is [`None`],
+ /// then returns a mutable reference to the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = None;
+ ///
+ /// {
+ /// let y: &mut u32 = x.get_or_insert_with(|| 5);
+ /// assert_eq!(y, &5);
+ ///
+ /// *y = 7;
+ /// }
+ ///
+ /// assert_eq!(x, Some(7));
+ /// ```
+ #[inline]
+ #[stable(feature = "option_entry", since = "1.20.0")]
+ pub fn get_or_insert_with<F: FnOnce() -> T>(&mut self, f: F) -> &mut T {
+ if let None = *self {
+ *self = Some(f());
+ }
+
+ match self {
+ Some(v) => v,
+ // SAFETY: a `None` variant for `self` would have been replaced by a `Some`
+ // variant in the code above.
+ None => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Misc
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Takes the value out of the option, leaving a [`None`] in its place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(2);
+ /// let y = x.take();
+ /// assert_eq!(x, None);
+ /// assert_eq!(y, Some(2));
+ ///
+ /// let mut x: Option<u32> = None;
+ /// let y = x.take();
+ /// assert_eq!(x, None);
+ /// assert_eq!(y, None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn take(&mut self) -> Option<T> {
+ mem::take(self)
+ }
+
+ /// Replaces the actual value in the option by the value given in parameter,
+ /// returning the old value if present,
+ /// leaving a [`Some`] in its place without deinitializing either one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = Some(2);
+ /// let old = x.replace(5);
+ /// assert_eq!(x, Some(5));
+ /// assert_eq!(old, Some(2));
+ ///
+ /// let mut x = None;
+ /// let old = x.replace(3);
+ /// assert_eq!(x, Some(3));
+ /// assert_eq!(old, None);
+ /// ```
+ #[inline]
+ #[stable(feature = "option_replace", since = "1.31.0")]
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ mem::replace(self, Some(value))
+ }
+
+ /// Zips `self` with another `Option`.
+ ///
+ /// If `self` is `Some(s)` and `other` is `Some(o)`, this method returns `Some((s, o))`.
+ /// Otherwise, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some(1);
+ /// let y = Some("hi");
+ /// let z = None::<u8>;
+ ///
+ /// assert_eq!(x.zip(y), Some((1, "hi")));
+ /// assert_eq!(x.zip(z), None);
+ /// ```
+ #[stable(feature = "option_zip_option", since = "1.46.0")]
+ pub fn zip<U>(self, other: Option<U>) -> Option<(T, U)> {
+ match (self, other) {
+ (Some(a), Some(b)) => Some((a, b)),
+ _ => None,
+ }
+ }
+
+ /// Zips `self` and another `Option` with function `f`.
+ ///
+ /// If `self` is `Some(s)` and `other` is `Some(o)`, this method returns `Some(f(s, o))`.
+ /// Otherwise, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_zip)]
+ ///
+ /// #[derive(Debug, PartialEq)]
+ /// struct Point {
+ /// x: f64,
+ /// y: f64,
+ /// }
+ ///
+ /// impl Point {
+ /// fn new(x: f64, y: f64) -> Self {
+ /// Self { x, y }
+ /// }
+ /// }
+ ///
+ /// let x = Some(17.5);
+ /// let y = Some(42.7);
+ ///
+ /// assert_eq!(x.zip_with(y, Point::new), Some(Point { x: 17.5, y: 42.7 }));
+ /// assert_eq!(x.zip_with(None, Point::new), None);
+ /// ```
+ #[unstable(feature = "option_zip", issue = "70086")]
+ pub fn zip_with<U, F, R>(self, other: Option<U>, f: F) -> Option<R>
+ where
+ F: FnOnce(T, U) -> R,
+ {
+ Some(f(self?, other?))
+ }
+}
+
+impl<T: Copy> Option<&T> {
+ /// Maps an `Option<&T>` to an `Option<T>` by copying the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 12;
+ /// let opt_x = Some(&x);
+ /// assert_eq!(opt_x, Some(&12));
+ /// let copied = opt_x.copied();
+ /// assert_eq!(copied, Some(12));
+ /// ```
+ #[stable(feature = "copied", since = "1.35.0")]
+ pub fn copied(self) -> Option<T> {
+ self.map(|&t| t)
+ }
+}
+
+impl<T: Copy> Option<&mut T> {
+ /// Maps an `Option<&mut T>` to an `Option<T>` by copying the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = 12;
+ /// let opt_x = Some(&mut x);
+ /// assert_eq!(opt_x, Some(&mut 12));
+ /// let copied = opt_x.copied();
+ /// assert_eq!(copied, Some(12));
+ /// ```
+ #[stable(feature = "copied", since = "1.35.0")]
+ pub fn copied(self) -> Option<T> {
+ self.map(|&mut t| t)
+ }
+}
+
+impl<T: Clone> Option<&T> {
+ /// Maps an `Option<&T>` to an `Option<T>` by cloning the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = 12;
+ /// let opt_x = Some(&x);
+ /// assert_eq!(opt_x, Some(&12));
+ /// let cloned = opt_x.cloned();
+ /// assert_eq!(cloned, Some(12));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn cloned(self) -> Option<T> {
+ self.map(|t| t.clone())
+ }
+}
+
+impl<T: Clone> Option<&mut T> {
+ /// Maps an `Option<&mut T>` to an `Option<T>` by cloning the contents of the
+ /// option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x = 12;
+ /// let opt_x = Some(&mut x);
+ /// assert_eq!(opt_x, Some(&mut 12));
+ /// let cloned = opt_x.cloned();
+ /// assert_eq!(cloned, Some(12));
+ /// ```
+ #[stable(since = "1.26.0", feature = "option_ref_mut_cloned")]
+ pub fn cloned(self) -> Option<T> {
+ self.map(|t| t.clone())
+ }
+}
+
+impl<T: fmt::Debug> Option<T> {
+ /// Consumes `self` while expecting [`None`] and returning nothing.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is a [`Some`], with a panic message including the
+ /// passed message, and the content of the [`Some`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_expect_none)]
+ ///
+ /// use std::collections::HashMap;
+ /// let mut squares = HashMap::new();
+ /// for i in -10..=10 {
+ /// // This will not panic, since all keys are unique.
+ /// squares.insert(i, i * i).expect_none("duplicate key");
+ /// }
+ /// ```
+ ///
+ /// ```{.should_panic}
+ /// #![feature(option_expect_none)]
+ ///
+ /// use std::collections::HashMap;
+ /// let mut sqrts = HashMap::new();
+ /// for i in -10..=10 {
+ /// // This will panic, since both negative and positive `i` will
+ /// // insert the same `i * i` key, returning the old `Some(i)`.
+ /// sqrts.insert(i * i, i).expect_none("duplicate key");
+ /// }
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[unstable(feature = "option_expect_none", reason = "newly added", issue = "62633")]
+ pub fn expect_none(self, msg: &str) {
+ if let Some(val) = self {
+ expect_none_failed(msg, &val);
+ }
+ }
+
+ /// Consumes `self` while expecting [`None`] and returning nothing.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is a [`Some`], with a custom panic message provided
+ /// by the [`Some`]'s value.
+ ///
+ /// [`Some(v)`]: Some
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_unwrap_none)]
+ ///
+ /// use std::collections::HashMap;
+ /// let mut squares = HashMap::new();
+ /// for i in -10..=10 {
+ /// // This will not panic, since all keys are unique.
+ /// squares.insert(i, i * i).unwrap_none();
+ /// }
+ /// ```
+ ///
+ /// ```{.should_panic}
+ /// #![feature(option_unwrap_none)]
+ ///
+ /// use std::collections::HashMap;
+ /// let mut sqrts = HashMap::new();
+ /// for i in -10..=10 {
+ /// // This will panic, since both negative and positive `i` will
+ /// // insert the same `i * i` key, returning the old `Some(i)`.
+ /// sqrts.insert(i * i, i).unwrap_none();
+ /// }
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[unstable(feature = "option_unwrap_none", reason = "newly added", issue = "62633")]
+ pub fn unwrap_none(self) {
+ if let Some(val) = self {
+ expect_none_failed("called `Option::unwrap_none()` on a `Some` value", &val);
+ }
+ }
+}
+
+impl<T: Default> Option<T> {
+ /// Returns the contained [`Some`] value or a default
+ ///
+ /// Consumes the `self` argument then, if [`Some`], returns the contained
+ /// value, otherwise if [`None`], returns the [default value] for that
+ /// type.
+ ///
+ /// # Examples
+ ///
+ /// Converts a string to an integer, turning poorly-formed strings
+ /// into 0 (the default value for integers). [`parse`] converts
+ /// a string to any other type that implements [`FromStr`], returning
+ /// [`None`] on error.
+ ///
+ /// ```
+ /// let good_year_from_input = "1909";
+ /// let bad_year_from_input = "190blarg";
+ /// let good_year = good_year_from_input.parse().ok().unwrap_or_default();
+ /// let bad_year = bad_year_from_input.parse().ok().unwrap_or_default();
+ ///
+ /// assert_eq!(1909, good_year);
+ /// assert_eq!(0, bad_year);
+ /// ```
+ ///
+ /// [default value]: Default::default
+ /// [`parse`]: str::parse
+ /// [`FromStr`]: crate::str::FromStr
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_or_default(self) -> T {
+ match self {
+ Some(x) => x,
+ None => Default::default(),
+ }
+ }
+}
+
+impl<T: Deref> Option<T> {
+ /// Converts from `Option<T>` (or `&Option<T>`) to `Option<&T::Target>`.
+ ///
+ /// Leaves the original Option in-place, creating a new one with a reference
+ /// to the original one, additionally coercing the contents via [`Deref`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Option<String> = Some("hey".to_owned());
+ /// assert_eq!(x.as_deref(), Some("hey"));
+ ///
+ /// let x: Option<String> = None;
+ /// assert_eq!(x.as_deref(), None);
+ /// ```
+ #[stable(feature = "option_deref", since = "1.40.0")]
+ pub fn as_deref(&self) -> Option<&T::Target> {
+ self.as_ref().map(|t| t.deref())
+ }
+}
+
+impl<T: DerefMut> Option<T> {
+ /// Converts from `Option<T>` (or `&mut Option<T>`) to `Option<&mut T::Target>`.
+ ///
+ /// Leaves the original `Option` in-place, creating a new one containing a mutable reference to
+ /// the inner type's `Deref::Target` type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut x: Option<String> = Some("hey".to_owned());
+ /// assert_eq!(x.as_deref_mut().map(|x| {
+ /// x.make_ascii_uppercase();
+ /// x
+ /// }), Some("HEY".to_owned().as_mut_str()));
+ /// ```
+ #[stable(feature = "option_deref", since = "1.40.0")]
+ pub fn as_deref_mut(&mut self) -> Option<&mut T::Target> {
+ self.as_mut().map(|t| t.deref_mut())
+ }
+}
+
+impl<T, E> Option<Result<T, E>> {
+ /// Transposes an `Option` of a [`Result`] into a [`Result`] of an `Option`.
+ ///
+ /// [`None`] will be mapped to [`Ok`]`(`[`None`]`)`.
+ /// [`Some`]`(`[`Ok`]`(_))` and [`Some`]`(`[`Err`]`(_))` will be mapped to
+ /// [`Ok`]`(`[`Some`]`(_))` and [`Err`]`(_)`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #[derive(Debug, Eq, PartialEq)]
+ /// struct SomeErr;
+ ///
+ /// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
+ /// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
+ /// assert_eq!(x, y.transpose());
+ /// ```
+ #[inline]
+ #[stable(feature = "transpose_result", since = "1.33.0")]
+ pub fn transpose(self) -> Result<Option<T>, E> {
+ match self {
+ Some(Ok(x)) => Ok(Some(x)),
+ Some(Err(e)) => Err(e),
+ None => Ok(None),
+ }
+ }
+}
+
+// This is a separate function to reduce the code size of .expect() itself.
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn expect_failed(msg: &str) -> ! {
+ panic!("{}", msg)
+}
+
+// This is a separate function to reduce the code size of .expect_none() itself.
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn expect_none_failed(msg: &str, value: &dyn fmt::Debug) -> ! {
+ panic!("{}: {:?}", msg, value)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Trait implementations
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for Option<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ match self {
+ Some(x) => Some(x.clone()),
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (Some(to), Some(from)) => to.clone_from(from),
+ (to, from) => *to = from.clone(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for Option<T> {
+ /// Returns [`None`][Option::None].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let opt: Option<u32> = Option::default();
+ /// assert!(opt.is_none());
+ /// ```
+ #[inline]
+ fn default() -> Option<T> {
+ None
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for Option<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Returns a consuming iterator over the possibly contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("string");
+ /// let v: Vec<&str> = x.into_iter().collect();
+ /// assert_eq!(v, ["string"]);
+ ///
+ /// let x = None;
+ /// let v: Vec<&str> = x.into_iter().collect();
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { inner: Item { opt: self } }
+ }
+}
+
+#[stable(since = "1.4.0", feature = "option_iter")]
+impl<'a, T> IntoIterator for &'a Option<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(since = "1.4.0", feature = "option_iter")]
+impl<'a, T> IntoIterator for &'a mut Option<T> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(since = "1.12.0", feature = "option_from")]
+impl<T> From<T> for Option<T> {
+ /// Copies `val` into a new `Some`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let o: Option<u8> = Option::from(67);
+ ///
+ /// assert_eq!(Some(67), o);
+ /// ```
+ fn from(val: T) -> Option<T> {
+ Some(val)
+ }
+}
+
+#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
+impl<'a, T> From<&'a Option<T>> for Option<&'a T> {
+ /// Converts from `&Option<T>` to `Option<&T>`.
+ ///
+ /// # Examples
+ ///
+ /// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original.
+ /// The [`map`] method takes the `self` argument by value, consuming the original,
+ /// so this technique uses `as_ref` to first take an `Option` to a reference
+ /// to the value inside the original.
+ ///
+ /// [`map`]: Option::map
+ /// [`String`]: ../../std/string/struct.String.html
+ ///
+ /// ```
+ /// let s: Option<String> = Some(String::from("Hello, Rustaceans!"));
+ /// let o: Option<usize> = Option::from(&s).map(|ss: &String| ss.len());
+ ///
+ /// println!("Can still print s: {:?}", s);
+ ///
+ /// assert_eq!(o, Some(18));
+ /// ```
+ fn from(o: &'a Option<T>) -> Option<&'a T> {
+ o.as_ref()
+ }
+}
+
+#[stable(feature = "option_ref_from_ref_option", since = "1.30.0")]
+impl<'a, T> From<&'a mut Option<T>> for Option<&'a mut T> {
+ /// Converts from `&mut Option<T>` to `Option<&mut T>`
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = Some(String::from("Hello"));
+ /// let o: Option<&mut String> = Option::from(&mut s);
+ ///
+ /// match o {
+ /// Some(t) => *t = String::from("Hello, Rustaceans!"),
+ /// None => (),
+ /// }
+ ///
+ /// assert_eq!(s, Some(String::from("Hello, Rustaceans!")));
+ /// ```
+ fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
+ o.as_mut()
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// The Option Iterators
+/////////////////////////////////////////////////////////////////////////////
+
+#[derive(Clone, Debug)]
+struct Item<A> {
+ opt: Option<A>,
+}
+
+impl<A> Iterator for Item<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ self.opt.take()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self.opt {
+ Some(_) => (1, Some(1)),
+ None => (0, Some(0)),
+ }
+ }
+}
+
+impl<A> DoubleEndedIterator for Item<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.opt.take()
+ }
+}
+
+impl<A> ExactSizeIterator for Item<A> {}
+impl<A> FusedIterator for Item<A> {}
+unsafe impl<A> TrustedLen for Item<A> {}
+
+/// An iterator over a reference to the [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`Option::iter`] function.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct Iter<'a, A: 'a> {
+ inner: Item<&'a A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> Iterator for Iter<'a, A> {
+ type Item = &'a A;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a A> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> DoubleEndedIterator for Iter<'a, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a A> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> ExactSizeIterator for Iter<'_, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A> FusedIterator for Iter<'_, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for Iter<'_, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> Clone for Iter<'_, A> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Iter { inner: self.inner.clone() }
+ }
+}
+
+/// An iterator over a mutable reference to the [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`Option::iter_mut`] function.
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct IterMut<'a, A: 'a> {
+ inner: Item<&'a mut A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> Iterator for IterMut<'a, A> {
+ type Item = &'a mut A;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut A> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, A> DoubleEndedIterator for IterMut<'a, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut A> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> ExactSizeIterator for IterMut<'_, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A> FusedIterator for IterMut<'_, A> {}
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IterMut<'_, A> {}
+
+/// An iterator over the value in [`Some`] variant of an [`Option`].
+///
+/// The iterator yields one value if the [`Option`] is a [`Some`], otherwise none.
+///
+/// This `struct` is created by the [`Option::into_iter`] function.
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<A> {
+ inner: Item<A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> Iterator for IntoIter<A> {
+ type Item = A;
+
+ #[inline]
+ fn next(&mut self) -> Option<A> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> DoubleEndedIterator for IntoIter<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A> ExactSizeIterator for IntoIter<A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<A> FusedIterator for IntoIter<A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IntoIter<A> {}
+
+/////////////////////////////////////////////////////////////////////////////
+// FromIterator
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, V: FromIterator<A>> FromIterator<Option<A>> for Option<V> {
+ /// Takes each element in the [`Iterator`]: if it is [`None`][Option::None],
+ /// no further elements are taken, and the [`None`][Option::None] is
+ /// returned. Should no [`None`][Option::None] occur, a container with the
+ /// values of each [`Option`] is returned.
+ ///
+ /// # Examples
+ ///
+ /// Here is an example which increments every integer in a vector.
+ /// We use the checked variant of `add` that returns `None` when the
+ /// calculation would result in an overflow.
+ ///
+ /// ```
+ /// let items = vec![0_u16, 1, 2];
+ ///
+ /// let res: Option<Vec<u16>> = items
+ /// .iter()
+ /// .map(|x| x.checked_add(1))
+ /// .collect();
+ ///
+ /// assert_eq!(res, Some(vec![1, 2, 3]));
+ /// ```
+ ///
+ /// As you can see, this will return the expected, valid items.
+ ///
+ /// Here is another example that tries to subtract one from another list
+ /// of integers, this time checking for underflow:
+ ///
+ /// ```
+ /// let items = vec![2_u16, 1, 0];
+ ///
+ /// let res: Option<Vec<u16>> = items
+ /// .iter()
+ /// .map(|x| x.checked_sub(1))
+ /// .collect();
+ ///
+ /// assert_eq!(res, None);
+ /// ```
+ ///
+ /// Since the last element is zero, it would underflow. Thus, the resulting
+ /// value is `None`.
+ ///
+ /// Here is a variation on the previous example, showing that no
+ /// further elements are taken from `iter` after the first `None`.
+ ///
+ /// ```
+ /// let items = vec![3_u16, 2, 1, 10];
+ ///
+ /// let mut shared = 0;
+ ///
+ /// let res: Option<Vec<u16>> = items
+ /// .iter()
+ /// .map(|x| { shared += x; x.checked_sub(2) })
+ /// .collect();
+ ///
+ /// assert_eq!(res, None);
+ /// assert_eq!(shared, 6);
+ /// ```
+ ///
+ /// Since the third element caused an underflow, no further elements were taken,
+ /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = Option<A>>>(iter: I) -> Option<V> {
+ // FIXME(#11084): This could be replaced with Iterator::scan when this
+ // performance bug is closed.
+
+ iter.into_iter().map(|x| x.ok_or(())).collect::<Result<_, _>>().ok()
+ }
+}
+
+/// The error type that results from applying the try operator (`?`) to a `None` value. If you wish
+/// to allow `x?` (where `x` is an `Option<T>`) to be converted into your error type, you can
+/// implement `impl From<NoneError>` for `YourErrorType`. In that case, `x?` within a function that
+/// returns `Result<_, YourErrorType>` will translate a `None` value into an `Err` result.
+#[rustc_diagnostic_item = "none_error"]
+#[unstable(feature = "try_trait", issue = "42327")]
+#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+pub struct NoneError;
+
+#[unstable(feature = "try_trait", issue = "42327")]
+impl<T> ops::Try for Option<T> {
+ type Ok = T;
+ type Error = NoneError;
+
+ #[inline]
+ fn into_result(self) -> Result<T, NoneError> {
+ self.ok_or(NoneError)
+ }
+
+ #[inline]
+ fn from_ok(v: T) -> Self {
+ Some(v)
+ }
+
+ #[inline]
+ fn from_error(_: NoneError) -> Self {
+ None
+ }
+}
+
+impl<T> Option<Option<T>> {
+ /// Converts from `Option<Option<T>>` to `Option<T>`
+ ///
+ /// # Examples
+ /// Basic usage:
+ /// ```
+ /// let x: Option<Option<u32>> = Some(Some(6));
+ /// assert_eq!(Some(6), x.flatten());
+ ///
+ /// let x: Option<Option<u32>> = Some(None);
+ /// assert_eq!(None, x.flatten());
+ ///
+ /// let x: Option<Option<u32>> = None;
+ /// assert_eq!(None, x.flatten());
+ /// ```
+ /// Flattening once only removes one level of nesting:
+ /// ```
+ /// let x: Option<Option<Option<u32>>> = Some(Some(Some(6)));
+ /// assert_eq!(Some(Some(6)), x.flatten());
+ /// assert_eq!(Some(6), x.flatten().flatten());
+ /// ```
+ #[inline]
+ #[stable(feature = "option_flattening", since = "1.40.0")]
+ pub fn flatten(self) -> Option<T> {
+ self.and_then(convert::identity)
+ }
+}
--- /dev/null
+//! Panic support in the standard library.
+
+#![stable(feature = "core_panic_info", since = "1.41.0")]
+
+use crate::any::Any;
+use crate::fmt;
+
+/// A struct providing information about a panic.
+///
+/// `PanicInfo` structure is passed to a panic hook set by the [`set_hook`]
+/// function.
+///
+/// [`set_hook`]: ../../std/panic/fn.set_hook.html
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|panic_info| {
+/// if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
+/// println!("panic occurred: {:?}", s);
+/// } else {
+/// println!("panic occurred");
+/// }
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
+#[lang = "panic_info"]
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+#[derive(Debug)]
+pub struct PanicInfo<'a> {
+ payload: &'a (dyn Any + Send),
+ message: Option<&'a fmt::Arguments<'a>>,
+ location: &'a Location<'a>,
+}
+
+impl<'a> PanicInfo<'a> {
+ #[unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ #[inline]
+ pub fn internal_constructor(
+ message: Option<&'a fmt::Arguments<'a>>,
+ location: &'a Location<'a>,
+ ) -> Self {
+ struct NoPayload;
+ PanicInfo { location, message, payload: &NoPayload }
+ }
+
+ #[unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ #[inline]
+ pub fn set_payload(&mut self, info: &'a (dyn Any + Send)) {
+ self.payload = info;
+ }
+
+ /// Returns the payload associated with the panic.
+ ///
+ /// This will commonly, but not always, be a `&'static str` or [`String`].
+ ///
+ /// [`String`]: ../../std/string/struct.String.html
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(s) = panic_info.payload().downcast_ref::<&str>() {
+ /// println!("panic occurred: {:?}", s);
+ /// } else {
+ /// println!("panic occurred");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ pub fn payload(&self) -> &(dyn Any + Send) {
+ self.payload
+ }
+
+ /// If the `panic!` macro from the `core` crate (not from `std`)
+ /// was used with a formatting string and some additional arguments,
+ /// returns that message ready to be used for example with [`fmt::write`]
+ #[unstable(feature = "panic_info_message", issue = "66745")]
+ pub fn message(&self) -> Option<&fmt::Arguments<'_>> {
+ self.message
+ }
+
+ /// Returns information about the location from which the panic originated,
+ /// if available.
+ ///
+ /// This method will currently always return [`Some`], but this may change
+ /// in future versions.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred in file '{}' at line {}",
+ /// location.file(),
+ /// location.line(),
+ /// );
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ pub fn location(&self) -> Option<&Location<'_>> {
+ // NOTE: If this is changed to sometimes return None,
+ // deal with that case in std::panicking::default_hook and std::panicking::begin_panic_fmt.
+ Some(&self.location)
+ }
+}
+
+#[stable(feature = "panic_hook_display", since = "1.26.0")]
+impl fmt::Display for PanicInfo<'_> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("panicked at ")?;
+ if let Some(message) = self.message {
+ write!(formatter, "'{}', ", message)?
+ } else if let Some(payload) = self.payload.downcast_ref::<&'static str>() {
+ write!(formatter, "'{}', ", payload)?
+ }
+ // NOTE: we cannot use downcast_ref::<String>() here
+ // since String is not available in libcore!
+ // The payload is a String when `std::panic!` is called with multiple arguments,
+ // but in that case the message is also available.
+
+ self.location.fmt(formatter)
+ }
+}
+
+/// A struct containing information about the location of a panic.
+///
+/// This structure is created by [`PanicInfo::location()`].
+///
+/// # Examples
+///
+/// ```should_panic
+/// use std::panic;
+///
+/// panic::set_hook(Box::new(|panic_info| {
+/// if let Some(location) = panic_info.location() {
+/// println!("panic occurred in file '{}' at line {}", location.file(), location.line());
+/// } else {
+/// println!("panic occurred but can't get location information...");
+/// }
+/// }));
+///
+/// panic!("Normal panic");
+/// ```
+///
+/// # Comparisons
+///
+/// Comparisons for equality and ordering are made in file, line, then column priority.
+/// Files are compared as strings, not `Path`, which could be unexpected.
+/// See [`Location::file`]'s documentation for more discussion.
+#[lang = "panic_location"]
+#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
+#[stable(feature = "panic_hooks", since = "1.10.0")]
+pub struct Location<'a> {
+ file: &'a str,
+ line: u32,
+ col: u32,
+}
+
+impl<'a> Location<'a> {
+ /// Returns the source location of the caller of this function. If that function's caller is
+ /// annotated then its call location will be returned, and so on up the stack to the first call
+ /// within a non-tracked function body.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use core::panic::Location;
+ ///
+ /// /// Returns the [`Location`] at which it is called.
+ /// #[track_caller]
+ /// fn get_caller_location() -> &'static Location<'static> {
+ /// Location::caller()
+ /// }
+ ///
+ /// /// Returns a [`Location`] from within this function's definition.
+ /// fn get_just_one_location() -> &'static Location<'static> {
+ /// get_caller_location()
+ /// }
+ ///
+ /// let fixed_location = get_just_one_location();
+ /// assert_eq!(fixed_location.file(), file!());
+ /// assert_eq!(fixed_location.line(), 14);
+ /// assert_eq!(fixed_location.column(), 5);
+ ///
+ /// // running the same untracked function in a different location gives us the same result
+ /// let second_fixed_location = get_just_one_location();
+ /// assert_eq!(fixed_location.file(), second_fixed_location.file());
+ /// assert_eq!(fixed_location.line(), second_fixed_location.line());
+ /// assert_eq!(fixed_location.column(), second_fixed_location.column());
+ ///
+ /// let this_location = get_caller_location();
+ /// assert_eq!(this_location.file(), file!());
+ /// assert_eq!(this_location.line(), 28);
+ /// assert_eq!(this_location.column(), 21);
+ ///
+ /// // running the tracked function in a different location produces a different value
+ /// let another_location = get_caller_location();
+ /// assert_eq!(this_location.file(), another_location.file());
+ /// assert_ne!(this_location.line(), another_location.line());
+ /// assert_ne!(this_location.column(), another_location.column());
+ /// ```
+ #[stable(feature = "track_caller", since = "1.46.0")]
+ #[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
+ #[track_caller]
+ pub const fn caller() -> &'static Location<'static> {
+ crate::intrinsics::caller_location()
+ }
+}
+
+impl<'a> Location<'a> {
+ #![unstable(
+ feature = "panic_internals",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+ )]
+ #[doc(hidden)]
+ pub const fn internal_constructor(file: &'a str, line: u32, col: u32) -> Self {
+ Location { file, line, col }
+ }
+
+ /// Returns the name of the source file from which the panic originated.
+ ///
+ /// # `&str`, not `&Path`
+ ///
+ /// The returned name refers to a source path on the compiling system, but it isn't valid to
+ /// represent this directly as a `&Path`. The compiled code may run on a different system with
+ /// a different `Path` implementation than the system providing the contents and this library
+ /// does not currently have a different "host path" type.
+ ///
+ /// The most surprising behavior occurs when "the same" file is reachable via multiple paths in
+ /// the module system (usually using the `#[path = "..."]` attribute or similar), which can
+ /// cause what appears to be identical code to return differing values from this function.
+ ///
+ /// # Cross-compilation
+ ///
+ /// This value is not suitable for passing to `Path::new` or similar constructors when the host
+ /// platform and target platform differ.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred in file '{}'", location.file());
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ pub fn file(&self) -> &str {
+ self.file
+ }
+
+ /// Returns the line number from which the panic originated.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred at line {}", location.line());
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[stable(feature = "panic_hooks", since = "1.10.0")]
+ pub fn line(&self) -> u32 {
+ self.line
+ }
+
+ /// Returns the column from which the panic originated.
+ ///
+ /// # Examples
+ ///
+ /// ```should_panic
+ /// use std::panic;
+ ///
+ /// panic::set_hook(Box::new(|panic_info| {
+ /// if let Some(location) = panic_info.location() {
+ /// println!("panic occurred at column {}", location.column());
+ /// } else {
+ /// println!("panic occurred but can't get location information...");
+ /// }
+ /// }));
+ ///
+ /// panic!("Normal panic");
+ /// ```
+ #[stable(feature = "panic_col", since = "1.25.0")]
+ pub fn column(&self) -> u32 {
+ self.col
+ }
+}
+
+#[stable(feature = "panic_hook_display", since = "1.26.0")]
+impl fmt::Display for Location<'_> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(formatter, "{}:{}:{}", self.file, self.line, self.col)
+ }
+}
+
+/// An internal trait used by libstd to pass data from libstd to `panic_unwind`
+/// and other panic runtimes. Not intended to be stabilized any time soon, do
+/// not use.
+#[unstable(feature = "std_internals", issue = "none")]
+#[doc(hidden)]
+pub unsafe trait BoxMeUp {
+ /// Take full ownership of the contents.
+ /// The return type is actually `Box<dyn Any + Send>`, but we cannot use `Box` in libcore.
+ ///
+ /// After this method got called, only some dummy default value is left in `self`.
+ /// Calling this method twice, or calling `get` after calling this method, is an error.
+ ///
+ /// The argument is borrowed because the panic runtime (`__rust_start_panic`) only
+ /// gets a borrowed `dyn BoxMeUp`.
+ fn take_box(&mut self) -> *mut (dyn Any + Send);
+
+ /// Just borrow the contents.
+ fn get(&mut self) -> &(dyn Any + Send);
+}
--- /dev/null
+//! Panic support for libcore
+//!
+//! The core library cannot define panicking, but it does *declare* panicking. This
+//! means that the functions inside of libcore are allowed to panic, but to be
+//! useful an upstream crate must define panicking for libcore to use. The current
+//! interface for panicking is:
+//!
+//! ```
+//! fn panic_impl(pi: &core::panic::PanicInfo<'_>) -> !
+//! # { loop {} }
+//! ```
+//!
+//! This definition allows for panicking with any general message, but it does not
+//! allow for failing with a `Box<Any>` value. (`PanicInfo` just contains a `&(dyn Any + Send)`,
+//! for which we fill in a dummy value in `PanicInfo::internal_constructor`.)
+//! The reason for this is that libcore is not allowed to allocate.
+//!
+//! This module contains a few other panicking functions, but these are just the
+//! necessary lang items for the compiler. All panics are funneled through this
+//! one function. The actual symbol is declared through the `#[panic_handler]` attribute.
+
+#![allow(dead_code, missing_docs)]
+#![unstable(
+ feature = "core_panic",
+ reason = "internal details of the implementation of the `panic!` and related macros",
+ issue = "none"
+)]
+
+use crate::fmt;
+use crate::panic::{Location, PanicInfo};
+
+/// The underlying implementation of libcore's `panic!` macro when no formatting is used.
+#[cold]
+// never inline unless panic_immediate_abort to avoid code
+// bloat at the call sites as much as possible
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[track_caller]
+#[lang = "panic"] // needed by codegen for panic on overflow and other `Assert` MIR terminators
+pub fn panic(expr: &'static str) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ // Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially
+ // reduce size overhead. The format_args! macro uses str's Display trait to
+ // write expr, which calls Formatter::pad, which must accommodate string
+ // truncation and padding (even though none is used here). Using
+ // Arguments::new_v1 may allow the compiler to omit Formatter::pad from the
+ // output binary, saving up to a few kilobytes.
+ panic_fmt(fmt::Arguments::new_v1(&[expr], &[]));
+}
+
+#[inline]
+#[track_caller]
+#[cfg_attr(not(bootstrap), lang = "panic_str")] // needed for const-evaluated panics
+pub fn panic_str(expr: &str) -> ! {
+ panic_fmt(format_args!("{}", expr));
+}
+
+#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[track_caller]
+#[lang = "panic_bounds_check"] // needed by codegen for panic on OOB array/slice access
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ panic!("index out of bounds: the len is {} but the index is {}", len, index)
+}
+
+/// The underlying implementation of libcore's `panic!` macro when formatting is used.
+#[cold]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
+#[track_caller]
+pub fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
+ if cfg!(feature = "panic_immediate_abort") {
+ super::intrinsics::abort()
+ }
+
+ // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
+ // that gets resolved to the `#[panic_handler]` function.
+ extern "Rust" {
+ #[lang = "panic_impl"]
+ fn panic_impl(pi: &PanicInfo<'_>) -> !;
+ }
+
+ let pi = PanicInfo::internal_constructor(Some(&fmt), Location::caller());
+
+ // SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
+ unsafe { panic_impl(&pi) }
+}
--- /dev/null
+//! Types that pin data to its location in memory.
+//!
+//! It is sometimes useful to have objects that are guaranteed not to move,
+//! in the sense that their placement in memory does not change, and can thus be relied upon.
+//! A prime example of such a scenario would be building self-referential structs,
+//! as moving an object with pointers to itself will invalidate them, which could cause undefined
+//! behavior.
+//!
+//! At a high level, a [`Pin<P>`] ensures that the pointee of any pointer type
+//! `P` has a stable location in memory, meaning it cannot be moved elsewhere
+//! and its memory cannot be deallocated until it gets dropped. We say that the
+//! pointee is "pinned". Things get more subtle when discussing types that
+//! combine pinned with non-pinned data; [see below](#projections-and-structural-pinning)
+//! for more details.
+//!
+//! By default, all types in Rust are movable. Rust allows passing all types by-value,
+//! and common smart-pointer types such as [`Box<T>`] and `&mut T` allow replacing and
+//! moving the values they contain: you can move out of a [`Box<T>`], or you can use [`mem::swap`].
+//! [`Pin<P>`] wraps a pointer type `P`, so [`Pin`]`<`[`Box`]`<T>>` functions much like a regular
+//! [`Box<T>`]: when a [`Pin`]`<`[`Box`]`<T>>` gets dropped, so do its contents, and the memory gets
+//! deallocated. Similarly, [`Pin`]`<&mut T>` is a lot like `&mut T`. However, [`Pin<P>`] does
+//! not let clients actually obtain a [`Box<T>`] or `&mut T` to pinned data, which implies that you
+//! cannot use operations such as [`mem::swap`]:
+//!
+//! ```
+//! use std::pin::Pin;
+//! fn swap_pins<T>(x: Pin<&mut T>, y: Pin<&mut T>) {
+//! // `mem::swap` needs `&mut T`, but we cannot get it.
+//! // We are stuck, we cannot swap the contents of these references.
+//! // We could use `Pin::get_unchecked_mut`, but that is unsafe for a reason:
+//! // we are not allowed to use it for moving things out of the `Pin`.
+//! }
+//! ```
+//!
+//! It is worth reiterating that [`Pin<P>`] does *not* change the fact that a Rust compiler
+//! considers all types movable. [`mem::swap`] remains callable for any `T`. Instead, [`Pin<P>`]
+//! prevents certain *values* (pointed to by pointers wrapped in [`Pin<P>`]) from being
+//! moved by making it impossible to call methods that require `&mut T` on them
+//! (like [`mem::swap`]).
+//!
+//! [`Pin<P>`] can be used to wrap any pointer type `P`, and as such it interacts with
+//! [`Deref`] and [`DerefMut`]. A [`Pin<P>`] where `P: Deref` should be considered
+//! as a "`P`-style pointer" to a pinned `P::Target` -- so, a [`Pin`]`<`[`Box`]`<T>>` is
+//! an owned pointer to a pinned `T`, and a [`Pin`]`<`[`Rc`]`<T>>` is a reference-counted
+//! pointer to a pinned `T`.
+//! For correctness, [`Pin<P>`] relies on the implementations of [`Deref`] and
+//! [`DerefMut`] not to move out of their `self` parameter, and only ever to
+//! return a pointer to pinned data when they are called on a pinned pointer.
+//!
+//! # `Unpin`
+//!
+//! Many types are always freely movable, even when pinned, because they do not
+//! rely on having a stable address. This includes all the basic types (like
+//! [`bool`], [`i32`], and references) as well as types consisting solely of these
+//! types. Types that do not care about pinning implement the [`Unpin`]
+//! auto-trait, which cancels the effect of [`Pin<P>`]. For `T: Unpin`,
+//! [`Pin`]`<`[`Box`]`<T>>` and [`Box<T>`] function identically, as do [`Pin`]`<&mut T>` and
+//! `&mut T`.
+//!
+//! Note that pinning and [`Unpin`] only affect the pointed-to type `P::Target`, not the pointer
+//! type `P` itself that got wrapped in [`Pin<P>`]. For example, whether or not [`Box<T>`] is
+//! [`Unpin`] has no effect on the behavior of [`Pin`]`<`[`Box`]`<T>>` (here, `T` is the
+//! pointed-to type).
+//!
+//! # Example: self-referential struct
+//!
+//! Before we go into more details to explain the guarantees and choices
+//! associated with `Pin<T>`, we discuss some examples for how it might be used.
+//! Feel free to [skip to where the theoretical discussion continues](#drop-guarantee).
+//!
+//! ```rust
+//! use std::pin::Pin;
+//! use std::marker::PhantomPinned;
+//! use std::ptr::NonNull;
+//!
+//! // This is a self-referential struct because the slice field points to the data field.
+//! // We cannot inform the compiler about that with a normal reference,
+//! // as this pattern cannot be described with the usual borrowing rules.
+//! // Instead we use a raw pointer, though one which is known not to be null,
+//! // as we know it's pointing at the string.
+//! struct Unmovable {
+//! data: String,
+//! slice: NonNull<String>,
+//! _pin: PhantomPinned,
+//! }
+//!
+//! impl Unmovable {
+//! // To ensure the data doesn't move when the function returns,
+//! // we place it in the heap where it will stay for the lifetime of the object,
+//! // and the only way to access it would be through a pointer to it.
+//! fn new(data: String) -> Pin<Box<Self>> {
+//! let res = Unmovable {
+//! data,
+//! // we only create the pointer once the data is in place
+//! // otherwise it will have already moved before we even started
+//! slice: NonNull::dangling(),
+//! _pin: PhantomPinned,
+//! };
+//! let mut boxed = Box::pin(res);
+//!
+//! let slice = NonNull::from(&boxed.data);
+//! // we know this is safe because modifying a field doesn't move the whole struct
+//! unsafe {
+//! let mut_ref: Pin<&mut Self> = Pin::as_mut(&mut boxed);
+//! Pin::get_unchecked_mut(mut_ref).slice = slice;
+//! }
+//! boxed
+//! }
+//! }
+//!
+//! let unmoved = Unmovable::new("hello".to_string());
+//! // The pointer should point to the correct location,
+//! // so long as the struct hasn't moved.
+//! // Meanwhile, we are free to move the pointer around.
+//! # #[allow(unused_mut)]
+//! let mut still_unmoved = unmoved;
+//! assert_eq!(still_unmoved.slice, NonNull::from(&still_unmoved.data));
+//!
+//! // Since our type doesn't implement Unpin, this will fail to compile:
+//! // let mut new_unmoved = Unmovable::new("world".to_string());
+//! // std::mem::swap(&mut *still_unmoved, &mut *new_unmoved);
+//! ```
+//!
+//! # Example: intrusive doubly-linked list
+//!
+//! In an intrusive doubly-linked list, the collection does not actually allocate
+//! the memory for the elements itself. Allocation is controlled by the clients,
+//! and elements can live on a stack frame that lives shorter than the collection does.
+//!
+//! To make this work, every element has pointers to its predecessor and successor in
+//! the list. Elements can only be added when they are pinned, because moving the elements
+//! around would invalidate the pointers. Moreover, the [`Drop`] implementation of a linked
+//! list element will patch the pointers of its predecessor and successor to remove itself
+//! from the list.
+//!
+//! Crucially, we have to be able to rely on [`drop`] being called. If an element
+//! could be deallocated or otherwise invalidated without calling [`drop`], the pointers into it
+//! from its neighboring elements would become invalid, which would break the data structure.
+//!
+//! Therefore, pinning also comes with a [`drop`]-related guarantee.
+//!
+//! # `Drop` guarantee
+//!
+//! The purpose of pinning is to be able to rely on the placement of some data in memory.
+//! To make this work, not just moving the data is restricted; deallocating, repurposing, or
+//! otherwise invalidating the memory used to store the data is restricted, too.
+//! Concretely, for pinned data you have to maintain the invariant
+//! that *its memory will not get invalidated or repurposed from the moment it gets pinned until
+//! when [`drop`] is called*. Only once [`drop`] returns or panics, the memory may be reused.
+//!
+//! Memory can be "invalidated" by deallocation, but also by
+//! replacing a [`Some(v)`] by [`None`], or calling [`Vec::set_len`] to "kill" some elements
+//! off of a vector. It can be repurposed by using [`ptr::write`] to overwrite it without
+//! calling the destructor first. None of this is allowed for pinned data without calling [`drop`].
+//!
+//! This is exactly the kind of guarantee that the intrusive linked list from the previous
+//! section needs to function correctly.
+//!
+//! Notice that this guarantee does *not* mean that memory does not leak! It is still
+//! completely okay not ever to call [`drop`] on a pinned element (e.g., you can still
+//! call [`mem::forget`] on a [`Pin`]`<`[`Box`]`<T>>`). In the example of the doubly-linked
+//! list, that element would just stay in the list. However you may not free or reuse the storage
+//! *without calling [`drop`]*.
+//!
+//! # `Drop` implementation
+//!
+//! If your type uses pinning (such as the two examples above), you have to be careful
+//! when implementing [`Drop`]. The [`drop`] function takes `&mut self`, but this
+//! is called *even if your type was previously pinned*! It is as if the
+//! compiler automatically called [`Pin::get_unchecked_mut`].
+//!
+//! This can never cause a problem in safe code because implementing a type that
+//! relies on pinning requires unsafe code, but be aware that deciding to make
+//! use of pinning in your type (for example by implementing some operation on
+//! [`Pin`]`<&Self>` or [`Pin`]`<&mut Self>`) has consequences for your [`Drop`]
+//! implementation as well: if an element of your type could have been pinned,
+//! you must treat [`Drop`] as implicitly taking [`Pin`]`<&mut Self>`.
+//!
+//! For example, you could implement `Drop` as follows:
+//!
+//! ```rust,no_run
+//! # use std::pin::Pin;
+//! # struct Type { }
+//! impl Drop for Type {
+//! fn drop(&mut self) {
+//! // `new_unchecked` is okay because we know this value is never used
+//! // again after being dropped.
+//! inner_drop(unsafe { Pin::new_unchecked(self)});
+//! fn inner_drop(this: Pin<&mut Type>) {
+//! // Actual drop code goes here.
+//! }
+//! }
+//! }
+//! ```
+//!
+//! The function `inner_drop` has the type that [`drop`] *should* have, so this makes sure that
+//! you do not accidentally use `self`/`this` in a way that is in conflict with pinning.
+//!
+//! Moreover, if your type is `#[repr(packed)]`, the compiler will automatically
+//! move fields around to be able to drop them. It might even do
+//! that for fields that happen to be sufficiently aligned. As a consequence, you cannot use
+//! pinning with a `#[repr(packed)]` type.
+//!
+//! # Projections and Structural Pinning
+//!
+//! When working with pinned structs, the question arises how one can access the
+//! fields of that struct in a method that takes just [`Pin`]`<&mut Struct>`.
+//! The usual approach is to write helper methods (so called *projections*)
+//! that turn [`Pin`]`<&mut Struct>` into a reference to the field, but what
+//! type should that reference have? Is it [`Pin`]`<&mut Field>` or `&mut Field`?
+//! The same question arises with the fields of an `enum`, and also when considering
+//! container/wrapper types such as [`Vec<T>`], [`Box<T>`], or [`RefCell<T>`].
+//! (This question applies to both mutable and shared references, we just
+//! use the more common case of mutable references here for illustration.)
+//!
+//! It turns out that it is actually up to the author of the data structure
+//! to decide whether the pinned projection for a particular field turns
+//! [`Pin`]`<&mut Struct>` into [`Pin`]`<&mut Field>` or `&mut Field`. There are some
+//! constraints though, and the most important constraint is *consistency*:
+//! every field can be *either* projected to a pinned reference, *or* have
+//! pinning removed as part of the projection. If both are done for the same field,
+//! that will likely be unsound!
+//!
+//! As the author of a data structure you get to decide for each field whether pinning
+//! "propagates" to this field or not. Pinning that propagates is also called "structural",
+//! because it follows the structure of the type.
+//! In the following subsections, we describe the considerations that have to be made
+//! for either choice.
+//!
+//! ## Pinning *is not* structural for `field`
+//!
+//! It may seem counter-intuitive that the field of a pinned struct might not be pinned,
+//! but that is actually the easiest choice: if a [`Pin`]`<&mut Field>` is never created,
+//! nothing can go wrong! So, if you decide that some field does not have structural pinning,
+//! all you have to ensure is that you never create a pinned reference to that field.
+//!
+//! Fields without structural pinning may have a projection method that turns
+//! [`Pin`]`<&mut Struct>` into `&mut Field`:
+//!
+//! ```rust,no_run
+//! # use std::pin::Pin;
+//! # type Field = i32;
+//! # struct Struct { field: Field }
+//! impl Struct {
+//! fn pin_get_field(self: Pin<&mut Self>) -> &mut Field {
+//! // This is okay because `field` is never considered pinned.
+//! unsafe { &mut self.get_unchecked_mut().field }
+//! }
+//! }
+//! ```
+//!
+//! You may also `impl Unpin for Struct` *even if* the type of `field`
+//! is not [`Unpin`]. What that type thinks about pinning is not relevant
+//! when no [`Pin`]`<&mut Field>` is ever created.
+//!
+//! ## Pinning *is* structural for `field`
+//!
+//! The other option is to decide that pinning is "structural" for `field`,
+//! meaning that if the struct is pinned then so is the field.
+//!
+//! This allows writing a projection that creates a [`Pin`]`<&mut Field>`, thus
+//! witnessing that the field is pinned:
+//!
+//! ```rust,no_run
+//! # use std::pin::Pin;
+//! # type Field = i32;
+//! # struct Struct { field: Field }
+//! impl Struct {
+//! fn pin_get_field(self: Pin<&mut Self>) -> Pin<&mut Field> {
+//! // This is okay because `field` is pinned when `self` is.
+//! unsafe { self.map_unchecked_mut(|s| &mut s.field) }
+//! }
+//! }
+//! ```
+//!
+//! However, structural pinning comes with a few extra requirements:
+//!
+//! 1. The struct must only be [`Unpin`] if all the structural fields are
+//! [`Unpin`]. This is the default, but [`Unpin`] is a safe trait, so as the author of
+//! the struct it is your responsibility *not* to add something like
+//! `impl<T> Unpin for Struct<T>`. (Notice that adding a projection operation
+//! requires unsafe code, so the fact that [`Unpin`] is a safe trait does not break
+//! the principle that you only have to worry about any of this if you use `unsafe`.)
+//! 2. The destructor of the struct must not move structural fields out of its argument. This
+//! is the exact point that was raised in the [previous section][drop-impl]: `drop` takes
+//! `&mut self`, but the struct (and hence its fields) might have been pinned before.
+//! You have to guarantee that you do not move a field inside your [`Drop`] implementation.
+//! In particular, as explained previously, this means that your struct must *not*
+//! be `#[repr(packed)]`.
+//! See that section for how to write [`drop`] in a way that the compiler can help you
+//! not accidentally break pinning.
+//! 3. You must make sure that you uphold the [`Drop` guarantee][drop-guarantee]:
+//! once your struct is pinned, the memory that contains the
+//! content is not overwritten or deallocated without calling the content's destructors.
+//! This can be tricky, as witnessed by [`VecDeque<T>`]: the destructor of [`VecDeque<T>`]
+//! can fail to call [`drop`] on all elements if one of the destructors panics. This violates
+//! the [`Drop`] guarantee, because it can lead to elements being deallocated without
+//! their destructor being called. ([`VecDeque<T>`] has no pinning projections, so this
+//! does not cause unsoundness.)
+//! 4. You must not offer any other operations that could lead to data being moved out of
+//! the structural fields when your type is pinned. For example, if the struct contains an
+//! [`Option<T>`] and there is a `take`-like operation with type
+//! `fn(Pin<&mut Struct<T>>) -> Option<T>`,
+//! that operation can be used to move a `T` out of a pinned `Struct<T>` -- which means
+//! pinning cannot be structural for the field holding this data.
+//!
+//! For a more complex example of moving data out of a pinned type, imagine if [`RefCell<T>`]
+//! had a method `fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T>`.
+//! Then we could do the following:
+//! ```compile_fail
+//! fn exploit_ref_cell<T>(rc: Pin<&mut RefCell<T>>) {
+//! { let p = rc.as_mut().get_pin_mut(); } // Here we get pinned access to the `T`.
+//! let rc_shr: &RefCell<T> = rc.into_ref().get_ref();
+//! let b = rc_shr.borrow_mut();
+//! let content = &mut *b; // And here we have `&mut T` to the same data.
+//! }
+//! ```
+//! This is catastrophic, it means we can first pin the content of the [`RefCell<T>`]
+//! (using `RefCell::get_pin_mut`) and then move that content using the mutable
+//! reference we got later.
+//!
+//! ## Examples
+//!
+//! For a type like [`Vec<T>`], both possibilities (structural pinning or not) make sense.
+//! A [`Vec<T>`] with structural pinning could have `get_pin`/`get_pin_mut` methods to get
+//! pinned references to elements. However, it could *not* allow calling
+//! [`pop`][Vec::pop] on a pinned [`Vec<T>`] because that would move the (structurally pinned)
+//! contents! Nor could it allow [`push`][Vec::push], which might reallocate and thus also move the
+//! contents.
+//!
+//! A [`Vec<T>`] without structural pinning could `impl<T> Unpin for Vec<T>`, because the contents
+//! are never pinned and the [`Vec<T>`] itself is fine with being moved as well.
+//! At that point pinning just has no effect on the vector at all.
+//!
+//! In the standard library, pointer types generally do not have structural pinning,
+//! and thus they do not offer pinning projections. This is why `Box<T>: Unpin` holds for all `T`.
+//! It makes sense to do this for pointer types, because moving the `Box<T>`
+//! does not actually move the `T`: the [`Box<T>`] can be freely movable (aka `Unpin`) even if
+//! the `T` is not. In fact, even [`Pin`]`<`[`Box`]`<T>>` and [`Pin`]`<&mut T>` are always
+//! [`Unpin`] themselves, for the same reason: their contents (the `T`) are pinned, but the
+//! pointers themselves can be moved without moving the pinned data. For both [`Box<T>`] and
+//! [`Pin`]`<`[`Box`]`<T>>`, whether the content is pinned is entirely independent of whether the
+//! pointer is pinned, meaning pinning is *not* structural.
+//!
+//! When implementing a [`Future`] combinator, you will usually need structural pinning
+//! for the nested futures, as you need to get pinned references to them to call [`poll`].
+//! But if your combinator contains any other data that does not need to be pinned,
+//! you can make those fields not structural and hence freely access them with a
+//! mutable reference even when you just have [`Pin`]`<&mut Self>` (such as in your own
+//! [`poll`] implementation).
+//!
+//! [`Pin<P>`]: Pin
+//! [`Deref`]: crate::ops::Deref
+//! [`DerefMut`]: crate::ops::DerefMut
+//! [`mem::swap`]: crate::mem::swap
+//! [`mem::forget`]: crate::mem::forget
+//! [`Box<T>`]: ../../std/boxed/struct.Box.html
+//! [`Vec<T>`]: ../../std/vec/struct.Vec.html
+//! [`Vec::set_len`]: ../../std/vec/struct.Vec.html#method.set_len
+//! [`Box`]: ../../std/boxed/struct.Box.html
+//! [Vec::pop]: ../../std/vec/struct.Vec.html#method.pop
+//! [Vec::push]: ../../std/vec/struct.Vec.html#method.push
+//! [`Rc`]: ../../std/rc/struct.Rc.html
+//! [`RefCell<T>`]: crate::cell::RefCell
+//! [`drop`]: Drop::drop
+//! [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html
+//! [`Option<T>`]: Option
+//! [`Some(v)`]: Some
+//! [`ptr::write`]: crate::ptr::write
+//! [`Future`]: crate::future::Future
+//! [drop-impl]: #drop-implementation
+//! [drop-guarantee]: #drop-guarantee
+//! [`poll`]: crate::future::Future::poll
+
+#![stable(feature = "pin", since = "1.33.0")]
+
+use crate::cmp::{self, PartialEq, PartialOrd};
+use crate::fmt;
+use crate::hash::{Hash, Hasher};
+use crate::marker::{Sized, Unpin};
+use crate::ops::{CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Receiver};
+
+/// A pinned pointer.
+///
+/// This is a wrapper around a kind of pointer which makes that pointer "pin" its
+/// value in place, preventing the value referenced by that pointer from being moved
+/// unless it implements [`Unpin`].
+///
+/// *See the [`pin` module] documentation for an explanation of pinning.*
+///
+/// [`pin` module]: self
+//
+// Note: the `Clone` derive below causes unsoundness as it's possible to implement
+// `Clone` for mutable references.
+// See <https://internals.rust-lang.org/t/unsoundness-in-pin/11311> for more details.
+#[stable(feature = "pin", since = "1.33.0")]
+#[lang = "pin"]
+#[fundamental]
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct Pin<P> {
+ pointer: P,
+}
+
+// The following implementations aren't derived in order to avoid soundness
+// issues. `&self.pointer` should not be accessible to untrusted trait
+// implementations.
+//
+// See <https://internals.rust-lang.org/t/unsoundness-in-pin/11311/73> for more details.
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref, Q: Deref> PartialEq<Pin<Q>> for Pin<P>
+where
+ P::Target: PartialEq<Q::Target>,
+{
+ fn eq(&self, other: &Pin<Q>) -> bool {
+ P::Target::eq(self, other)
+ }
+
+ fn ne(&self, other: &Pin<Q>) -> bool {
+ P::Target::ne(self, other)
+ }
+}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref<Target: Eq>> Eq for Pin<P> {}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref, Q: Deref> PartialOrd<Pin<Q>> for Pin<P>
+where
+ P::Target: PartialOrd<Q::Target>,
+{
+ fn partial_cmp(&self, other: &Pin<Q>) -> Option<cmp::Ordering> {
+ P::Target::partial_cmp(self, other)
+ }
+
+ fn lt(&self, other: &Pin<Q>) -> bool {
+ P::Target::lt(self, other)
+ }
+
+ fn le(&self, other: &Pin<Q>) -> bool {
+ P::Target::le(self, other)
+ }
+
+ fn gt(&self, other: &Pin<Q>) -> bool {
+ P::Target::gt(self, other)
+ }
+
+ fn ge(&self, other: &Pin<Q>) -> bool {
+ P::Target::ge(self, other)
+ }
+}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref<Target: Ord>> Ord for Pin<P> {
+ fn cmp(&self, other: &Self) -> cmp::Ordering {
+ P::Target::cmp(self, other)
+ }
+}
+
+#[stable(feature = "pin_trait_impls", since = "1.41.0")]
+impl<P: Deref<Target: Hash>> Hash for Pin<P> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ P::Target::hash(self, state);
+ }
+}
+
+impl<P: Deref<Target: Unpin>> Pin<P> {
+ /// Construct a new `Pin<P>` around a pointer to some data of a type that
+ /// implements [`Unpin`].
+ ///
+ /// Unlike `Pin::new_unchecked`, this method is safe because the pointer
+ /// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const fn new(pointer: P) -> Pin<P> {
+ // SAFETY: the value pointed to is `Unpin`, and so has no requirements
+ // around pinning.
+ unsafe { Pin::new_unchecked(pointer) }
+ }
+
+ /// Unwraps this `Pin<P>` returning the underlying pointer.
+ ///
+ /// This requires that the data inside this `Pin` is [`Unpin`] so that we
+ /// can ignore the pinning invariants when unwrapping it.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin_into_inner", since = "1.39.0")]
+ pub const fn into_inner(pin: Pin<P>) -> P {
+ pin.pointer
+ }
+}
+
+impl<P: Deref> Pin<P> {
+ /// Construct a new `Pin<P>` around a reference to some data of a type that
+ /// may or may not implement `Unpin`.
+ ///
+ /// If `pointer` dereferences to an `Unpin` type, `Pin::new` should be used
+ /// instead.
+ ///
+ /// # Safety
+ ///
+ /// This constructor is unsafe because we cannot guarantee that the data
+ /// pointed to by `pointer` is pinned, meaning that the data will not be moved or
+ /// its storage invalidated until it gets dropped. If the constructed `Pin<P>` does
+ /// not guarantee that the data `P` points to is pinned, that is a violation of
+ /// the API contract and may lead to undefined behavior in later (safe) operations.
+ ///
+ /// By using this method, you are making a promise about the `P::Deref` and
+ /// `P::DerefMut` implementations, if they exist. Most importantly, they
+ /// must not move out of their `self` arguments: `Pin::as_mut` and `Pin::as_ref`
+ /// will call `DerefMut::deref_mut` and `Deref::deref` *on the pinned pointer*
+ /// and expect these methods to uphold the pinning invariants.
+ /// Moreover, by calling this method you promise that the reference `P`
+ /// dereferences to will not be moved out of again; in particular, it
+ /// must not be possible to obtain a `&mut P::Target` and then
+ /// move out of that reference (using, for example [`mem::swap`]).
+ ///
+ /// For example, calling `Pin::new_unchecked` on an `&'a mut T` is unsafe because
+ /// while you are able to pin it for the given lifetime `'a`, you have no control
+ /// over whether it is kept pinned once `'a` ends:
+ /// ```
+ /// use std::mem;
+ /// use std::pin::Pin;
+ ///
+ /// fn move_pinned_ref<T>(mut a: T, mut b: T) {
+ /// unsafe {
+ /// let p: Pin<&mut T> = Pin::new_unchecked(&mut a);
+ /// // This should mean the pointee `a` can never move again.
+ /// }
+ /// mem::swap(&mut a, &mut b);
+ /// // The address of `a` changed to `b`'s stack slot, so `a` got moved even
+ /// // though we have previously pinned it! We have violated the pinning API contract.
+ /// }
+ /// ```
+ /// A value, once pinned, must remain pinned forever (unless its type implements `Unpin`).
+ ///
+ /// Similarly, calling `Pin::new_unchecked` on an `Rc<T>` is unsafe because there could be
+ /// aliases to the same data that are not subject to the pinning restrictions:
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::pin::Pin;
+ ///
+ /// fn move_pinned_rc<T>(mut x: Rc<T>) {
+ /// let pinned = unsafe { Pin::new_unchecked(Rc::clone(&x)) };
+ /// {
+ /// let p: Pin<&T> = pinned.as_ref();
+ /// // This should mean the pointee can never move again.
+ /// }
+ /// drop(pinned);
+ /// let content = Rc::get_mut(&mut x).unwrap();
+ /// // Now, if `x` was the only reference, we have a mutable reference to
+ /// // data that we pinned above, which we could use to move it as we have
+ /// // seen in the previous example. We have violated the pinning API contract.
+ /// }
+ /// ```
+ ///
+ /// [`mem::swap`]: crate::mem::swap
+ #[lang = "new_unchecked"]
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const unsafe fn new_unchecked(pointer: P) -> Pin<P> {
+ Pin { pointer }
+ }
+
+ /// Gets a pinned shared reference from this pinned pointer.
+ ///
+ /// This is a generic method to go from `&Pin<Pointer<T>>` to `Pin<&T>`.
+ /// It is safe because, as part of the contract of `Pin::new_unchecked`,
+ /// the pointee cannot move after `Pin<Pointer<T>>` got created.
+ /// "Malicious" implementations of `Pointer::Deref` are likewise
+ /// ruled out by the contract of `Pin::new_unchecked`.
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn as_ref(&self) -> Pin<&P::Target> {
+ // SAFETY: see documentation on this function
+ unsafe { Pin::new_unchecked(&*self.pointer) }
+ }
+
+ /// Unwraps this `Pin<P>` returning the underlying pointer.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that you will continue to
+ /// treat the pointer `P` as pinned after you call this function, so that
+ /// the invariants on the `Pin` type can be upheld. If the code using the
+ /// resulting `P` does not continue to maintain the pinning invariants that
+ /// is a violation of the API contract and may lead to undefined behavior in
+ /// later (safe) operations.
+ ///
+ /// If the underlying data is [`Unpin`], [`Pin::into_inner`] should be used
+ /// instead.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin_into_inner", since = "1.39.0")]
+ pub const unsafe fn into_inner_unchecked(pin: Pin<P>) -> P {
+ pin.pointer
+ }
+}
+
+impl<P: DerefMut> Pin<P> {
+ /// Gets a pinned mutable reference from this pinned pointer.
+ ///
+ /// This is a generic method to go from `&mut Pin<Pointer<T>>` to `Pin<&mut T>`.
+ /// It is safe because, as part of the contract of `Pin::new_unchecked`,
+ /// the pointee cannot move after `Pin<Pointer<T>>` got created.
+ /// "Malicious" implementations of `Pointer::DerefMut` are likewise
+ /// ruled out by the contract of `Pin::new_unchecked`.
+ ///
+ /// This method is useful when doing multiple calls to functions that consume the pinned type.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::pin::Pin;
+ ///
+ /// # struct Type {}
+ /// impl Type {
+ /// fn method(self: Pin<&mut Self>) {
+ /// // do something
+ /// }
+ ///
+ /// fn call_method_twice(mut self: Pin<&mut Self>) {
+ /// // `method` consumes `self`, so reborrow the `Pin<&mut Self>` via `as_mut`.
+ /// self.as_mut().method();
+ /// self.as_mut().method();
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn as_mut(&mut self) -> Pin<&mut P::Target> {
+ // SAFETY: see documentation on this function
+ unsafe { Pin::new_unchecked(&mut *self.pointer) }
+ }
+
+ /// Assigns a new value to the memory behind the pinned reference.
+ ///
+ /// This overwrites pinned data, but that is okay: its destructor gets
+ /// run before being overwritten, so no pinning guarantee is violated.
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[inline(always)]
+ pub fn set(&mut self, value: P::Target)
+ where
+ P::Target: Sized,
+ {
+ *(self.pointer) = value;
+ }
+}
+
+impl<'a, T: ?Sized> Pin<&'a T> {
+ /// Constructs a new pin by mapping the interior value.
+ ///
+ /// For example, if you wanted to get a `Pin` of a field of something,
+ /// you could use this to get access to that field in one line of code.
+ /// However, there are several gotchas with these "pinning projections";
+ /// see the [`pin` module] documentation for further details on that topic.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that the data you return
+ /// will not move so long as the argument value does not move (for example,
+ /// because it is one of the fields of that value), and also that you do
+ /// not move out of the argument you receive to the interior function.
+ ///
+ /// [`pin` module]: self#projections-and-structural-pinning
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub unsafe fn map_unchecked<U, F>(self, func: F) -> Pin<&'a U>
+ where
+ U: ?Sized,
+ F: FnOnce(&T) -> &U,
+ {
+ let pointer = &*self.pointer;
+ let new_pointer = func(pointer);
+
+ // SAFETY: the safety contract for `new_unchecked` must be
+ // upheld by the caller.
+ unsafe { Pin::new_unchecked(new_pointer) }
+ }
+
+ /// Gets a shared reference out of a pin.
+ ///
+ /// This is safe because it is not possible to move out of a shared reference.
+ /// It may seem like there is an issue here with interior mutability: in fact,
+ /// it *is* possible to move a `T` out of a `&RefCell<T>`. However, this is
+ /// not a problem as long as there does not also exist a `Pin<&T>` pointing
+ /// to the same data, and `RefCell<T>` does not let you create a pinned reference
+ /// to its contents. See the discussion on ["pinning projections"] for further
+ /// details.
+ ///
+ /// Note: `Pin` also implements `Deref` to the target, which can be used
+ /// to access the inner value. However, `Deref` only provides a reference
+ /// that lives for as long as the borrow of the `Pin`, not the lifetime of
+ /// the `Pin` itself. This method allows turning the `Pin` into a reference
+ /// with the same lifetime as the original `Pin`.
+ ///
+ /// ["pinning projections"]: self#projections-and-structural-pinning
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const fn get_ref(self) -> &'a T {
+ self.pointer
+ }
+}
+
+impl<'a, T: ?Sized> Pin<&'a mut T> {
+ /// Converts this `Pin<&mut T>` into a `Pin<&T>` with the same lifetime.
+ #[inline(always)]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub const fn into_ref(self) -> Pin<&'a T> {
+ Pin { pointer: self.pointer }
+ }
+
+ /// Gets a mutable reference to the data inside of this `Pin`.
+ ///
+ /// This requires that the data inside this `Pin` is `Unpin`.
+ ///
+ /// Note: `Pin` also implements `DerefMut` to the data, which can be used
+ /// to access the inner value. However, `DerefMut` only provides a reference
+ /// that lives for as long as the borrow of the `Pin`, not the lifetime of
+ /// the `Pin` itself. This method allows turning the `Pin` into a reference
+ /// with the same lifetime as the original `Pin`.
+ #[inline(always)]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const fn get_mut(self) -> &'a mut T
+ where
+ T: Unpin,
+ {
+ self.pointer
+ }
+
+ /// Gets a mutable reference to the data inside of this `Pin`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that you will never move
+ /// the data out of the mutable reference you receive when you call this
+ /// function, so that the invariants on the `Pin` type can be upheld.
+ ///
+ /// If the underlying data is `Unpin`, `Pin::get_mut` should be used
+ /// instead.
+ #[inline(always)]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const unsafe fn get_unchecked_mut(self) -> &'a mut T {
+ self.pointer
+ }
+
+ /// Construct a new pin by mapping the interior value.
+ ///
+ /// For example, if you wanted to get a `Pin` of a field of something,
+ /// you could use this to get access to that field in one line of code.
+ /// However, there are several gotchas with these "pinning projections";
+ /// see the [`pin` module] documentation for further details on that topic.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe. You must guarantee that the data you return
+ /// will not move so long as the argument value does not move (for example,
+ /// because it is one of the fields of that value), and also that you do
+ /// not move out of the argument you receive to the interior function.
+ ///
+ /// [`pin` module]: self#projections-and-structural-pinning
+ #[stable(feature = "pin", since = "1.33.0")]
+ pub unsafe fn map_unchecked_mut<U, F>(self, func: F) -> Pin<&'a mut U>
+ where
+ U: ?Sized,
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ // SAFETY: the caller is responsible for not moving the
+ // value out of this reference.
+ let pointer = unsafe { Pin::get_unchecked_mut(self) };
+ let new_pointer = func(pointer);
+ // SAFETY: as the value of `this` is guaranteed to not have
+ // been moved out, this call to `new_unchecked` is safe.
+ unsafe { Pin::new_unchecked(new_pointer) }
+ }
+}
+
+impl<T: ?Sized> Pin<&'static T> {
+ /// Get a pinned reference from a static reference.
+ ///
+ /// This is safe, because `T` is borrowed for the `'static` lifetime, which
+ /// never ends.
+ #[unstable(feature = "pin_static_ref", issue = "78186")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const fn static_ref(r: &'static T) -> Pin<&'static T> {
+ // SAFETY: The 'static borrow guarantees the data will not be
+ // moved/invalidated until it gets dropped (which is never).
+ unsafe { Pin::new_unchecked(r) }
+ }
+}
+
+impl<T: ?Sized> Pin<&'static mut T> {
+ /// Get a pinned mutable reference from a static mutable reference.
+ ///
+ /// This is safe, because `T` is borrowed for the `'static` lifetime, which
+ /// never ends.
+ #[unstable(feature = "pin_static_ref", issue = "78186")]
+ #[rustc_const_unstable(feature = "const_pin", issue = "76654")]
+ pub const fn static_mut(r: &'static mut T) -> Pin<&'static mut T> {
+ // SAFETY: The 'static borrow guarantees the data will not be
+ // moved/invalidated until it gets dropped (which is never).
+ unsafe { Pin::new_unchecked(r) }
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: Deref> Deref for Pin<P> {
+ type Target = P::Target;
+ fn deref(&self) -> &P::Target {
+ Pin::get_ref(Pin::as_ref(self))
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: DerefMut<Target: Unpin>> DerefMut for Pin<P> {
+ fn deref_mut(&mut self) -> &mut P::Target {
+ Pin::get_mut(Pin::as_mut(self))
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<P: Receiver> Receiver for Pin<P> {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: fmt::Debug> fmt::Debug for Pin<P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.pointer, f)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: fmt::Display> fmt::Display for Pin<P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.pointer, f)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P: fmt::Pointer> fmt::Pointer for Pin<P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.pointer, f)
+ }
+}
+
+// Note: this means that any impl of `CoerceUnsized` that allows coercing from
+// a type that impls `Deref<Target=impl !Unpin>` to a type that impls
+// `Deref<Target=Unpin>` is unsound. Any such impl would probably be unsound
+// for other reasons, though, so we just need to take care not to allow such
+// impls to land in std.
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P, U> CoerceUnsized<Pin<U>> for Pin<P> where P: CoerceUnsized<U> {}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<P, U> DispatchFromDyn<Pin<U>> for Pin<P> where P: DispatchFromDyn<U> {}
--- /dev/null
+//! The libcore prelude
+
+#![stable(feature = "core_prelude", since = "1.4.0")]
+
+pub mod v1;
--- /dev/null
+//! The core prelude
+//!
+//! This module is intended for users of libcore which do not link to libstd as
+//! well. This module is imported by default when `#![no_std]` is used in the
+//! same manner as the standard library's prelude.
+
+#![stable(feature = "core_prelude", since = "1.4.0")]
+
+// Re-exported core operators
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::marker::{Copy, Send, Sized, Sync, Unpin};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::ops::{Drop, Fn, FnMut, FnOnce};
+
+// Re-exported functions
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::mem::drop;
+
+// Re-exported types and traits
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::clone::Clone;
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::cmp::{Eq, Ord, PartialEq, PartialOrd};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::convert::{AsMut, AsRef, From, Into};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::default::Default;
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::iter::{DoubleEndedIterator, ExactSizeIterator};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::iter::{Extend, IntoIterator, Iterator};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::option::Option::{self, None, Some};
+#[stable(feature = "core_prelude", since = "1.4.0")]
+#[doc(no_inline)]
+pub use crate::result::Result::{self, Err, Ok};
+
+// Re-exported built-in macros
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(no_inline)]
+pub use crate::fmt::macros::Debug;
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[doc(no_inline)]
+pub use crate::hash::macros::Hash;
+
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+#[doc(no_inline)]
+pub use crate::{
+ asm, assert, cfg, column, compile_error, concat, concat_idents, env, file, format_args,
+ format_args_nl, global_asm, include, include_bytes, include_str, line, llvm_asm, log_syntax,
+ module_path, option_env, stringify, trace_macros,
+};
+
+#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
+#[allow(deprecated)]
+#[doc(no_inline)]
+pub use crate::macros::builtin::{
+ bench, global_allocator, test, test_case, RustcDecodable, RustcEncodable,
+};
+
+#[unstable(
+ feature = "cfg_accessible",
+ issue = "64797",
+ reason = "`cfg_accessible` is not fully implemented"
+)]
+#[doc(no_inline)]
+pub use crate::macros::builtin::cfg_accessible;
--- /dev/null
+//! This module reexports the primitive types to allow usage that is not
+//! possibly shadowed by other declared types.
+//!
+//! This is normally only useful in macro generated code.
+//!
+//! An example of this is when generating a new struct and an impl for it:
+//!
+//! ```rust,compile_fail
+//! pub struct bool;
+//!
+//! impl QueryId for bool {
+//! const SOME_PROPERTY: bool = true;
+//! }
+//!
+//! # trait QueryId { const SOME_PROPERTY: core::primitive::bool; }
+//! ```
+//!
+//! Note that the `SOME_PROPERTY` associated constant would not compile, as its
+//! type `bool` refers to the struct, rather than to the primitive bool type.
+//!
+//! A correct implementation could look like:
+//!
+//! ```rust
+//! # #[allow(non_camel_case_types)]
+//! pub struct bool;
+//!
+//! impl QueryId for bool {
+//! const SOME_PROPERTY: core::primitive::bool = true;
+//! }
+//!
+//! # trait QueryId { const SOME_PROPERTY: core::primitive::bool; }
+//! ```
+
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use bool;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use char;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use f32;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use f64;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i128;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i16;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i32;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i64;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use i8;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use isize;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use str;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u128;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u16;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u32;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u64;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use u8;
+#[stable(feature = "core_primitive", since = "1.43.0")]
+pub use usize;
--- /dev/null
+use super::*;
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
+use crate::intrinsics;
+use crate::mem;
+use crate::slice::{self, SliceIndex};
+
+#[lang = "const_ptr"]
+impl<T: ?Sized> *const T {
+ /// Returns `true` if the pointer is null.
+ ///
+ /// Note that unsized types have many possible null pointers, as only the
+ /// raw data pointer is considered, not their length, vtable, etc.
+ /// Therefore, two pointers that are null may still not compare equal to
+ /// each other.
+ ///
+ /// ## Behavior during const evaluation
+ ///
+ /// When this function is used during const evaluation, it may return `false` for pointers
+ /// that turn out to be null at runtime. Specifically, when a pointer to some memory
+ /// is offset beyond its bounds in such a way that the resulting pointer is null,
+ /// the function will still return `false`. There is no way for CTFE to know
+ /// the absolute position of that memory, so we cannot tell if the pointer is
+ /// null or not.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "Follow the rabbit";
+ /// let ptr: *const u8 = s.as_ptr();
+ /// assert!(!ptr.is_null());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
+ #[inline]
+ pub const fn is_null(self) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ (self as *const u8).guaranteed_eq(null())
+ }
+
+ /// Casts to a pointer of another type.
+ #[stable(feature = "ptr_cast", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
+ #[inline]
+ pub const fn cast<U>(self) -> *const U {
+ self as _
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
+ /// must be used instead.
+ ///
+ /// [`as_uninit_ref`]: #method.as_uninit_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_ref() {
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// ```
+ #[stable(feature = "ptr_as_ref", since = "1.9.0")]
+ #[inline]
+ pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
+ // SAFETY: the caller must guarantee that `self` is valid
+ // for a reference if it isn't null.
+ if self.is_null() { None } else { unsafe { Some(&*self) } }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// [`as_ref`]: #method.as_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(ptr_as_uninit)]
+ ///
+ /// let ptr: *const u8 = &10u8 as *const u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_uninit_ref() {
+ /// println!("We got back the value: {}!", val_back.assume_init());
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
+ }
+
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_offset`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_offset`]: #method.wrapping_offset
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ /// let ptr: *const u8 = s.as_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.offset(1) as char);
+ /// println!("{}", *ptr.offset(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const unsafe fn offset(self, count: isize) -> *const T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { intrinsics::offset(self, count) }
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer does not need to be in bounds, but it is
+ /// potentially hazardous to dereference (which requires `unsafe`).
+ ///
+ /// In particular, the resulting pointer remains attached to the same allocated
+ /// object that `self` points to. It may *not* be used to access a
+ /// different allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// In other words, `x.wrapping_offset((y as usize).wrapping_sub(x as usize) / size_of::<T>())`
+ /// is *not* the same as `y`, and dereferencing it is undefined behavior
+ /// unless `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`offset`], this method basically delays the requirement of staying
+ /// within the same allocated object: [`offset`] is immediate Undefined Behavior when
+ /// crossing object boundaries; `wrapping_offset` produces a pointer but still leads
+ /// to Undefined Behavior if that pointer is dereferenced. [`offset`] can be optimized
+ /// better and is thus preferable in performance-sensitive code.
+ ///
+ /// If you need to cross object boundaries, cast the pointer to an integer and
+ /// do the arithmetic there.
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_offset(6);
+ ///
+ /// // This loop prints "1, 3, 5, "
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_offset(step);
+ /// }
+ /// ```
+ #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn wrapping_offset(self, count: isize) -> *const T
+ where
+ T: Sized,
+ {
+ // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
+ unsafe { intrinsics::arith_offset(self, count) }
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This function is the inverse of [`offset`].
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and other pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// The compiler and standard library generally try to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = [0; 5];
+ /// let ptr1: *const i32 = &a[1];
+ /// let ptr2: *const i32 = &a[3];
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// let ptr1 = Box::into_raw(Box::new(0u8)) as *const u8;
+ /// let ptr2 = Box::into_raw(Box::new(1u8)) as *const u8;
+ /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = (ptr1 as *const u8).wrapping_offset(diff);
+ /// assert_eq!(ptr2 as usize, ptr2_other as usize);
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[stable(feature = "ptr_offset_from", since = "1.47.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "41079")]
+ #[inline]
+ pub const unsafe fn offset_from(self, origin: *const T) -> isize
+ where
+ T: Sized,
+ {
+ let pointee_size = mem::size_of::<T>();
+ assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
+ // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
+ unsafe { intrinsics::ptr_offset_from(self, origin) }
+ }
+
+ /// Returns whether two pointers are guaranteed to be equal.
+ ///
+ /// At runtime this function behaves like `self == other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine equality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be equal.
+ /// But when it returns `true`, the pointers are guaranteed to be equal.
+ ///
+ /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_ne`]: #method.guaranteed_ne
+ ///
+ /// The return value may change depending on the compiler version and unsafe code may not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const fn guaranteed_eq(self, other: *const T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_eq(self, other)
+ }
+
+ /// Returns whether two pointers are guaranteed to be unequal.
+ ///
+ /// At runtime this function behaves like `self != other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine the inequality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be unequal.
+ /// But when it returns `true`, the pointers are guaranteed to be unequal.
+ ///
+ /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_eq`]: #method.guaranteed_eq
+ ///
+ /// The return value may change depending on the compiler version and unsafe code may not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const fn guaranteed_ne(self, other: *const T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_ne(self, other)
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_add`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_add`]: #method.wrapping_add
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ /// let ptr: *const u8 = s.as_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.add(1) as char);
+ /// println!("{}", *ptr.add(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset(count as isize) }
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_sub`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_sub`]: #method.wrapping_sub
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: *const u8 = s.as_ptr().add(3);
+ /// println!("{}", *end.sub(1) as char);
+ /// println!("{}", *end.sub(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset((count as isize).wrapping_neg()) }
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset(count as isize)`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer does not need to be in bounds, but it is
+ /// potentially hazardous to dereference (which requires `unsafe`).
+ ///
+ /// In particular, the resulting pointer remains attached to the same allocated
+ /// object that `self` points to. It may *not* be used to access a
+ /// different allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// Compared to [`add`], this method basically delays the requirement of staying
+ /// within the same allocated object: [`add`] is immediate Undefined Behavior when
+ /// crossing object boundaries; `wrapping_add` produces a pointer but still leads
+ /// to Undefined Behavior if that pointer is dereferenced. [`add`] can be optimized
+ /// better and is thus preferable in performance-sensitive code.
+ ///
+ /// If you need to cross object boundaries, cast the pointer to an integer and
+ /// do the arithmetic there.
+ ///
+ /// [`add`]: #method.add
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_add(6);
+ ///
+ /// // This loop prints "1, 3, 5, "
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_add(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn wrapping_add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset(count as isize)
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer does not need to be in bounds, but it is
+ /// potentially hazardous to dereference (which requires `unsafe`).
+ ///
+ /// In particular, the resulting pointer remains attached to the same allocated
+ /// object that `self` points to. It may *not* be used to access a
+ /// different allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// Compared to [`sub`], this method basically delays the requirement of staying
+ /// within the same allocated object: [`sub`] is immediate Undefined Behavior when
+ /// crossing object boundaries; `wrapping_sub` produces a pointer but still leads
+ /// to Undefined Behavior if that pointer is dereferenced. [`sub`] can be optimized
+ /// better and is thus preferable in performance-sensitive code.
+ ///
+ /// If you need to cross object boundaries, cast the pointer to an integer and
+ /// do the arithmetic there.
+ ///
+ /// [`sub`]: #method.sub
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements (backwards)
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let start_rounded_down = ptr.wrapping_sub(2);
+ /// ptr = ptr.wrapping_add(4);
+ /// let step = 2;
+ /// // This loop prints "5, 3, 1, "
+ /// while ptr != start_rounded_down {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_sub(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn wrapping_sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset((count as isize).wrapping_neg())
+ }
+
+ /// Sets the pointer value to `ptr`.
+ ///
+ /// In case `self` is a (fat) pointer to an unsized type, this operation
+ /// will only affect the pointer part, whereas for (thin) pointers to
+ /// sized types, this has the same effect as a simple assignment.
+ ///
+ /// The resulting pointer will have provenance of `val`, i.e., for a fat
+ /// pointer, this operation is semantically the same as creating a new
+ /// fat pointer with the data pointer value of `val` but the metadata of
+ /// `self`.
+ ///
+ /// # Examples
+ ///
+ /// This function is primarily useful for allowing byte-wise pointer
+ /// arithmetic on potentially fat pointers:
+ ///
+ /// ```
+ /// #![feature(set_ptr_value)]
+ /// # use core::fmt::Debug;
+ /// let arr: [i32; 3] = [1, 2, 3];
+ /// let mut ptr = &arr[0] as *const dyn Debug;
+ /// let thin = ptr as *const u8;
+ /// unsafe {
+ /// ptr = ptr.set_ptr_value(thin.add(8));
+ /// # assert_eq!(*(ptr as *const i32), 3);
+ /// println!("{:?}", &*ptr); // will print "3"
+ /// }
+ /// ```
+ #[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline]
+ pub fn set_ptr_value(mut self, val: *const u8) -> Self {
+ let thin = &mut self as *mut *const T as *mut *const u8;
+ // SAFETY: In case of a thin pointer, this operations is identical
+ // to a simple assignment. In case of a fat pointer, with the current
+ // fat pointer layout implementation, the first field of such a
+ // pointer is always the data pointer, which is likewise assigned.
+ unsafe { *thin = val };
+ self
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: ./ptr/fn.read.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read`.
+ unsafe { read(self) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn read_volatile(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { read_volatile(self) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { read_unaligned(self) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: ./ptr/fn.copy.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn copy_to(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { copy(self, dest, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { copy_nonoverlapping(self, dest, count) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
+ /// used with the `wrapping_add` method.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// # fn foo(n: usize) {
+ /// # use std::mem::align_of;
+ /// # unsafe {
+ /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
+ /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ /// if offset < x.len() - n - 1 {
+ /// let u16_ptr = ptr.add(offset) as *const u16;
+ /// assert_ne!(*u16_ptr, 500);
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # } }
+ /// ```
+ #[stable(feature = "align_offset", since = "1.36.0")]
+ pub fn align_offset(self, align: usize) -> usize
+ where
+ T: Sized,
+ {
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
+ }
+}
+
+#[lang = "const_slice_ptr"]
+impl<T> *const [T] {
+ /// Returns the length of a raw slice.
+ ///
+ /// The returned value is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, even when the raw slice cannot be cast to a slice
+ /// reference because the pointer is null or unaligned.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_len)]
+ ///
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert_eq!(slice.len(), 3);
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn len(self) -> usize {
+ // SAFETY: this is safe because `*const [T]` and `FatPtr<T>` have the same layout.
+ // Only `std` can make this guarantee.
+ unsafe { Repr { rust: self }.raw }.len
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// This is equivalent to casting `self` to `*const T`, but more type-safe.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get)]
+ /// use std::ptr;
+ ///
+ /// let slice: *const [i8] = ptr::slice_from_raw_parts(ptr::null(), 3);
+ /// assert_eq!(slice.as_ptr(), 0 as *const i8);
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_ptr(self) -> *const T {
+ self as *const T
+ }
+
+ /// Returns a raw pointer to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// Calling this method with an out-of-bounds index or when `self` is not dereferencable
+ /// is *[undefined behavior]* even if the resulting pointer is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_get)]
+ ///
+ /// let x = &[1, 2, 4] as *const [i32];
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked(1), x.as_ptr().add(1));
+ /// }
+ /// ```
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[inline]
+ pub unsafe fn get_unchecked<I>(self, index: I) -> *const I::Output
+ where
+ I: SliceIndex<[T]>,
+ {
+ // SAFETY: the caller ensures that `self` is dereferencable and `index` in-bounds.
+ unsafe { index.get_unchecked(self) }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared slice to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// [`as_ref`]: #method.as_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+ /// and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts`][].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [`NonNull::dangling()`]: NonNull::dangling
+ /// [`pointer::offset`]: ../std/primitive.pointer.html#method.offset
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
+ if self.is_null() {
+ None
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
+ Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
+ }
+ }
+}
+
+// Equality for pointers
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialEq for *const T {
+ #[inline]
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Eq for *const T {}
+
+// Comparison for pointers
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Ord for *const T {
+ #[inline]
+ fn cmp(&self, other: &*const T) -> Ordering {
+ if self < other {
+ Less
+ } else if self == other {
+ Equal
+ } else {
+ Greater
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialOrd for *const T {
+ #[inline]
+ fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+
+ #[inline]
+ fn lt(&self, other: &*const T) -> bool {
+ *self < *other
+ }
+
+ #[inline]
+ fn le(&self, other: &*const T) -> bool {
+ *self <= *other
+ }
+
+ #[inline]
+ fn gt(&self, other: &*const T) -> bool {
+ *self > *other
+ }
+
+ #[inline]
+ fn ge(&self, other: &*const T) -> bool {
+ *self >= *other
+ }
+}
--- /dev/null
+//! Manually manage memory through raw pointers.
+//!
+//! *[See also the pointer primitive types](../../std/primitive.pointer.html).*
+//!
+//! # Safety
+//!
+//! Many functions in this module take raw pointers as arguments and read from
+//! or write to them. For this to be safe, these pointers must be *valid*.
+//! Whether a pointer is valid depends on the operation it is used for
+//! (read or write), and the extent of the memory that is accessed (i.e.,
+//! how many bytes are read/written). Most functions use `*mut T` and `*const T`
+//! to access only a single value, in which case the documentation omits the size
+//! and implicitly assumes it to be `size_of::<T>()` bytes.
+//!
+//! The precise rules for validity are not determined yet. The guarantees that are
+//! provided at this point are very minimal:
+//!
+//! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst].
+//! * All pointers (except for the null pointer) are valid for all operations of
+//! [size zero][zst].
+//! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer
+//! be *dereferenceable*: the memory range of the given size starting at the pointer must all be
+//! within the bounds of a single allocated object. Note that in Rust,
+//! every (stack-allocated) variable is considered a separate allocated object.
+//! * All accesses performed by functions in this module are *non-atomic* in the sense
+//! of [atomic operations] used to synchronize between threads. This means it is
+//! undefined behavior to perform two concurrent accesses to the same location from different
+//! threads unless both accesses only read from memory. Notice that this explicitly
+//! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot
+//! be used for inter-thread synchronization.
+//! * The result of casting a reference to a pointer is valid for as long as the
+//! underlying object is live and no reference (just raw pointers) is used to
+//! access the same memory.
+//!
+//! These axioms, along with careful use of [`offset`] for pointer arithmetic,
+//! are enough to correctly implement many useful things in unsafe code. Stronger guarantees
+//! will be provided eventually, as the [aliasing] rules are being determined. For more
+//! information, see the [book] as well as the section in the reference devoted
+//! to [undefined behavior][ub].
+//!
+//! ## Alignment
+//!
+//! Valid raw pointers as defined above are not necessarily properly aligned (where
+//! "proper" alignment is defined by the pointee type, i.e., `*const T` must be
+//! aligned to `mem::align_of::<T>()`). However, most functions require their
+//! arguments to be properly aligned, and will explicitly state
+//! this requirement in their documentation. Notable exceptions to this are
+//! [`read_unaligned`] and [`write_unaligned`].
+//!
+//! When a function requires proper alignment, it does so even if the access
+//! has size 0, i.e., even if memory is not actually touched. Consider using
+//! [`NonNull::dangling`] in such cases.
+//!
+//! [aliasing]: ../../nomicon/aliasing.html
+//! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer
+//! [ub]: ../../reference/behavior-considered-undefined.html
+//! [zst]: ../../nomicon/exotic-sizes.html#zero-sized-types-zsts
+//! [atomic operations]: crate::sync::atomic
+//! [`offset`]: ../../std/primitive.pointer.html#method.offset
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cmp::Ordering;
+use crate::fmt;
+use crate::hash;
+use crate::intrinsics::{self, abort, is_aligned_and_not_null, is_nonoverlapping};
+use crate::mem::{self, MaybeUninit};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::copy_nonoverlapping;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::copy;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(inline)]
+pub use crate::intrinsics::write_bytes;
+
+mod non_null;
+#[stable(feature = "nonnull", since = "1.25.0")]
+pub use non_null::NonNull;
+
+mod unique;
+#[unstable(feature = "ptr_internals", issue = "none")]
+pub use unique::Unique;
+
+mod const_ptr;
+mod mut_ptr;
+
+/// Executes the destructor (if any) of the pointed-to value.
+///
+/// This is semantically equivalent to calling [`ptr::read`] and discarding
+/// the result, but has the following advantages:
+///
+/// * It is *required* to use `drop_in_place` to drop unsized types like
+/// trait objects, because they can't be read out onto the stack and
+/// dropped normally.
+///
+/// * It is friendlier to the optimizer to do this over [`ptr::read`] when
+/// dropping manually allocated memory (e.g., in the implementations of
+/// `Box`/`Rc`/`Vec`), as the compiler doesn't need to prove that it's
+/// sound to elide the copy.
+///
+/// * It can be used to drop [pinned] data when `T` is not `repr(packed)`
+/// (pinned data must not be moved before it is dropped).
+///
+/// Unaligned values cannot be dropped in place, they must be copied to an aligned
+/// location first using [`ptr::read_unaligned`]. For packed structs, this move is
+/// done automatically by the compiler. This means the fields of packed structs
+/// are not dropped in-place.
+///
+/// [`ptr::read`]: self::read
+/// [`ptr::read_unaligned`]: self::read_unaligned
+/// [pinned]: crate::pin
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `to_drop` must be [valid] for both reads and writes.
+///
+/// * `to_drop` must be properly aligned.
+///
+/// * The value `to_drop` points to must be valid for dropping, which may mean it must uphold
+/// additional invariants - this is type-dependent.
+///
+/// Additionally, if `T` is not [`Copy`], using the pointed-to value after
+/// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop =
+/// foo` counts as a use because it will cause the value to be dropped
+/// again. [`write()`] can be used to overwrite data without causing it to be
+/// dropped.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Manually remove the last item from a vector:
+///
+/// ```
+/// use std::ptr;
+/// use std::rc::Rc;
+///
+/// let last = Rc::new(1);
+/// let weak = Rc::downgrade(&last);
+///
+/// let mut v = vec![Rc::new(0), last];
+///
+/// unsafe {
+/// // Get a raw pointer to the last element in `v`.
+/// let ptr = &mut v[1] as *mut _;
+/// // Shorten `v` to prevent the last item from being dropped. We do that first,
+/// // to prevent issues if the `drop_in_place` below panics.
+/// v.set_len(1);
+/// // Without a call `drop_in_place`, the last item would never be dropped,
+/// // and the memory it manages would be leaked.
+/// ptr::drop_in_place(ptr);
+/// }
+///
+/// assert_eq!(v, &[0.into()]);
+///
+/// // Ensure that the last item was dropped.
+/// assert!(weak.upgrade().is_none());
+/// ```
+///
+/// Notice that the compiler performs this copy automatically when dropping packed structs,
+/// i.e., you do not usually have to worry about such issues unless you call `drop_in_place`
+/// manually.
+#[stable(feature = "drop_in_place", since = "1.8.0")]
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+
+ // SAFETY: see comment above
+ unsafe { drop_in_place(to_drop) }
+}
+
+/// Creates a null raw pointer.
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let p: *const i32 = ptr::null();
+/// assert!(p.is_null());
+/// ```
+#[inline(always)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")]
+pub const fn null<T>() -> *const T {
+ 0 as *const T
+}
+
+/// Creates a null mutable raw pointer.
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let p: *mut i32 = ptr::null_mut();
+/// assert!(p.is_null());
+/// ```
+#[inline(always)]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_promotable]
+#[rustc_const_stable(feature = "const_ptr_null", since = "1.32.0")]
+pub const fn null_mut<T>() -> *mut T {
+ 0 as *mut T
+}
+
+#[repr(C)]
+pub(crate) union Repr<T> {
+ pub(crate) rust: *const [T],
+ rust_mut: *mut [T],
+ pub(crate) raw: FatPtr<T>,
+}
+
+#[repr(C)]
+pub(crate) struct FatPtr<T> {
+ data: *const T,
+ pub(crate) len: usize,
+}
+
+// Manual impl needed to avoid `T: Clone` bound.
+impl<T> Clone for FatPtr<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+// Manual impl needed to avoid `T: Copy` bound.
+impl<T> Copy for FatPtr<T> {}
+
+/// Forms a raw slice from a pointer and a length.
+///
+/// The `len` argument is the number of **elements**, not the number of bytes.
+///
+/// This function is safe, but actually using the return value is unsafe.
+/// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
+///
+/// [`slice::from_raw_parts`]: crate::slice::from_raw_parts
+///
+/// # Examples
+///
+/// ```rust
+/// use std::ptr;
+///
+/// // create a slice pointer when starting out with a pointer to the first element
+/// let x = [5, 6, 7];
+/// let raw_pointer = x.as_ptr();
+/// let slice = ptr::slice_from_raw_parts(raw_pointer, 3);
+/// assert_eq!(unsafe { &*slice }[2], 7);
+/// ```
+#[inline]
+#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+pub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
+ // SAFETY: Accessing the value from the `Repr` union is safe since *const [T]
+ // and FatPtr have the same memory layouts. Only std can make this
+ // guarantee.
+ unsafe { Repr { raw: FatPtr { data, len } }.rust }
+}
+
+/// Performs the same functionality as [`slice_from_raw_parts`], except that a
+/// raw mutable slice is returned, as opposed to a raw immutable slice.
+///
+/// See the documentation of [`slice_from_raw_parts`] for more details.
+///
+/// This function is safe, but actually using the return value is unsafe.
+/// See the documentation of [`slice::from_raw_parts_mut`] for slice safety requirements.
+///
+/// [`slice::from_raw_parts_mut`]: crate::slice::from_raw_parts_mut
+///
+/// # Examples
+///
+/// ```rust
+/// use std::ptr;
+///
+/// let x = &mut [5, 6, 7];
+/// let raw_pointer = x.as_mut_ptr();
+/// let slice = ptr::slice_from_raw_parts_mut(raw_pointer, 3);
+///
+/// unsafe {
+/// (*slice)[2] = 99; // assign a value at an index in the slice
+/// };
+///
+/// assert_eq!(unsafe { &*slice }[2], 99);
+/// ```
+#[inline]
+#[stable(feature = "slice_from_raw_parts", since = "1.42.0")]
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
+ // SAFETY: Accessing the value from the `Repr` union is safe since *mut [T]
+ // and FatPtr have the same memory layouts
+ unsafe { Repr { raw: FatPtr { data, len } }.rust_mut }
+}
+
+/// Swaps the values at two mutable locations of the same type, without
+/// deinitializing either.
+///
+/// But for the following two exceptions, this function is semantically
+/// equivalent to [`mem::swap`]:
+///
+/// * It operates on raw pointers instead of references. When references are
+/// available, [`mem::swap`] should be preferred.
+///
+/// * The two pointed-to values may overlap. If the values do overlap, then the
+/// overlapping region of memory from `x` will be used. This is demonstrated
+/// in the second example below.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * Both `x` and `y` must be [valid] for both reads and writes.
+///
+/// * Both `x` and `y` must be properly aligned.
+///
+/// Note that even if `T` has size `0`, the pointers must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Swapping two non-overlapping regions:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut array = [0, 1, 2, 3];
+///
+/// let x = array[0..].as_mut_ptr() as *mut [u32; 2]; // this is `array[0..2]`
+/// let y = array[2..].as_mut_ptr() as *mut [u32; 2]; // this is `array[2..4]`
+///
+/// unsafe {
+/// ptr::swap(x, y);
+/// assert_eq!([2, 3, 0, 1], array);
+/// }
+/// ```
+///
+/// Swapping two overlapping regions:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut array = [0, 1, 2, 3];
+///
+/// let x = array[0..].as_mut_ptr() as *mut [u32; 3]; // this is `array[0..3]`
+/// let y = array[1..].as_mut_ptr() as *mut [u32; 3]; // this is `array[1..4]`
+///
+/// unsafe {
+/// ptr::swap(x, y);
+/// // The indices `1..3` of the slice overlap between `x` and `y`.
+/// // Reasonable results would be for to them be `[2, 3]`, so that indices `0..3` are
+/// // `[1, 2, 3]` (matching `y` before the `swap`); or for them to be `[0, 1]`
+/// // so that indices `1..4` are `[0, 1, 2]` (matching `x` before the `swap`).
+/// // This implementation is defined to make the latter choice.
+/// assert_eq!([1, 0, 1, 2], array);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
+ // Give ourselves some scratch space to work with.
+ // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
+ let mut tmp = MaybeUninit::<T>::uninit();
+
+ // Perform the swap
+ // SAFETY: the caller must guarantee that `x` and `y` are
+ // valid for writes and properly aligned. `tmp` cannot be
+ // overlapping either `x` or `y` because `tmp` was just allocated
+ // on the stack as a separate allocated object.
+ unsafe {
+ copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
+ copy(y, x, 1); // `x` and `y` may overlap
+ copy_nonoverlapping(tmp.as_ptr(), y, 1);
+ }
+}
+
+/// Swaps `count * size_of::<T>()` bytes between the two regions of memory
+/// beginning at `x` and `y`. The two regions must *not* overlap.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * Both `x` and `y` must be [valid] for both reads and writes of `count *
+/// size_of::<T>()` bytes.
+///
+/// * Both `x` and `y` must be properly aligned.
+///
+/// * The region of memory beginning at `x` with a size of `count *
+/// size_of::<T>()` bytes must *not* overlap with the region of memory
+/// beginning at `y` with the same size.
+///
+/// Note that even if the effectively copied size (`count * size_of::<T>()`) is `0`,
+/// the pointers must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut x = [1, 2, 3, 4];
+/// let mut y = [7, 8, 9];
+///
+/// unsafe {
+/// ptr::swap_nonoverlapping(x.as_mut_ptr(), y.as_mut_ptr(), 2);
+/// }
+///
+/// assert_eq!(x, [7, 8, 3, 4]);
+/// assert_eq!(y, [1, 2, 9]);
+/// ```
+#[inline]
+#[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
+pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
+ if cfg!(debug_assertions)
+ && !(is_aligned_and_not_null(x)
+ && is_aligned_and_not_null(y)
+ && is_nonoverlapping(x, y, count))
+ {
+ // Not panicking to keep codegen impact smaller.
+ abort();
+ }
+
+ let x = x as *mut u8;
+ let y = y as *mut u8;
+ let len = mem::size_of::<T>() * count;
+ // SAFETY: the caller must guarantee that `x` and `y` are
+ // valid for writes and properly aligned.
+ unsafe { swap_nonoverlapping_bytes(x, y, len) }
+}
+
+#[inline]
+pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
+ // For types smaller than the block optimization below,
+ // just swap directly to avoid pessimizing codegen.
+ if mem::size_of::<T>() < 32 {
+ // SAFETY: the caller must guarantee that `x` and `y` are valid
+ // for writes, properly aligned, and non-overlapping.
+ unsafe {
+ let z = read(x);
+ copy_nonoverlapping(y, x, 1);
+ write(y, z);
+ }
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `swap_nonoverlapping`.
+ unsafe { swap_nonoverlapping(x, y, 1) };
+ }
+}
+
+#[inline]
+unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
+ // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
+ // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
+ // Haswell E processors. LLVM is more able to optimize if we give a struct a
+ // #[repr(simd)], even if we don't actually use this struct directly.
+ //
+ // FIXME repr(simd) broken on emscripten and redox
+ #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox")), repr(simd))]
+ struct Block(u64, u64, u64, u64);
+ struct UnalignedBlock(u64, u64, u64, u64);
+
+ let block_size = mem::size_of::<Block>();
+
+ // Loop through x & y, copying them `Block` at a time
+ // The optimizer should unroll the loop fully for most types
+ // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively
+ let mut i = 0;
+ while i + block_size <= len {
+ // Create some uninitialized memory as scratch space
+ // Declaring `t` here avoids aligning the stack when this loop is unused
+ let mut t = mem::MaybeUninit::<Block>::uninit();
+ let t = t.as_mut_ptr() as *mut u8;
+
+ // SAFETY: As `i < len`, and as the caller must guarantee that `x` and `y` are valid
+ // for `len` bytes, `x + i` and `y + i` must be valid adresses, which fulfills the
+ // safety contract for `add`.
+ //
+ // Also, the caller must guarantee that `x` and `y` are valid for writes, properly aligned,
+ // and non-overlapping, which fulfills the safety contract for `copy_nonoverlapping`.
+ unsafe {
+ let x = x.add(i);
+ let y = y.add(i);
+
+ // Swap a block of bytes of x & y, using t as a temporary buffer
+ // This should be optimized into efficient SIMD operations where available
+ copy_nonoverlapping(x, t, block_size);
+ copy_nonoverlapping(y, x, block_size);
+ copy_nonoverlapping(t, y, block_size);
+ }
+ i += block_size;
+ }
+
+ if i < len {
+ // Swap any remaining bytes
+ let mut t = mem::MaybeUninit::<UnalignedBlock>::uninit();
+ let rem = len - i;
+
+ let t = t.as_mut_ptr() as *mut u8;
+
+ // SAFETY: see previous safety comment.
+ unsafe {
+ let x = x.add(i);
+ let y = y.add(i);
+
+ copy_nonoverlapping(x, t, rem);
+ copy_nonoverlapping(y, x, rem);
+ copy_nonoverlapping(t, y, rem);
+ }
+ }
+}
+
+/// Moves `src` into the pointed `dst`, returning the previous `dst` value.
+///
+/// Neither value is dropped.
+///
+/// This function is semantically equivalent to [`mem::replace`] except that it
+/// operates on raw pointers instead of references. When references are
+/// available, [`mem::replace`] should be preferred.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for both reads and writes.
+///
+/// * `dst` must be properly aligned.
+///
+/// * `dst` must point to a properly initialized value of type `T`.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut rust = vec!['b', 'u', 's', 't'];
+///
+/// // `mem::replace` would have the same effect without requiring the unsafe
+/// // block.
+/// let b = unsafe {
+/// ptr::replace(&mut rust[0], 'r')
+/// };
+///
+/// assert_eq!(b, 'b');
+/// assert_eq!(rust, &['r', 'u', 's', 't']);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
+ // SAFETY: the caller must guarantee that `dst` is valid to be
+ // cast to a mutable reference (valid for writes, aligned, initialized),
+ // and cannot overlap `src` since `dst` must point to a distinct
+ // allocated object.
+ unsafe {
+ mem::swap(&mut *dst, &mut src); // cannot overlap
+ }
+ src
+}
+
+/// Reads the value from `src` without moving it. This leaves the
+/// memory in `src` unchanged.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads.
+///
+/// * `src` must be properly aligned. Use [`read_unaligned`] if this is not the
+/// case.
+///
+/// * `src` must point to a properly initialized value of type `T`.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let x = 12;
+/// let y = &x as *const i32;
+///
+/// unsafe {
+/// assert_eq!(std::ptr::read(y), 12);
+/// }
+/// ```
+///
+/// Manually implement [`mem::swap`]:
+///
+/// ```
+/// use std::ptr;
+///
+/// fn swap<T>(a: &mut T, b: &mut T) {
+/// unsafe {
+/// // Create a bitwise copy of the value at `a` in `tmp`.
+/// let tmp = ptr::read(a);
+///
+/// // Exiting at this point (either by explicitly returning or by
+/// // calling a function which panics) would cause the value in `tmp` to
+/// // be dropped while the same value is still referenced by `a`. This
+/// // could trigger undefined behavior if `T` is not `Copy`.
+///
+/// // Create a bitwise copy of the value at `b` in `a`.
+/// // This is safe because mutable references cannot alias.
+/// ptr::copy_nonoverlapping(b, a, 1);
+///
+/// // As above, exiting here could trigger undefined behavior because
+/// // the same value is referenced by `a` and `b`.
+///
+/// // Move `tmp` into `b`.
+/// ptr::write(b, tmp);
+///
+/// // `tmp` has been moved (`write` takes ownership of its second argument),
+/// // so nothing is dropped implicitly here.
+/// }
+/// }
+///
+/// let mut foo = "foo".to_owned();
+/// let mut bar = "bar".to_owned();
+///
+/// swap(&mut foo, &mut bar);
+///
+/// assert_eq!(foo, "bar");
+/// assert_eq!(bar, "foo");
+/// ```
+///
+/// ## Ownership of the Returned Value
+///
+/// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`].
+/// If `T` is not [`Copy`], using both the returned value and the value at
+/// `*src` can violate memory safety. Note that assigning to `*src` counts as a
+/// use because it will attempt to drop the value at `*src`.
+///
+/// [`write()`] can be used to overwrite data without causing it to be dropped.
+///
+/// ```
+/// use std::ptr;
+///
+/// let mut s = String::from("foo");
+/// unsafe {
+/// // `s2` now points to the same underlying memory as `s`.
+/// let mut s2: String = ptr::read(&s);
+///
+/// assert_eq!(s2, "foo");
+///
+/// // Assigning to `s2` causes its original value to be dropped. Beyond
+/// // this point, `s` must no longer be used, as the underlying memory has
+/// // been freed.
+/// s2 = String::default();
+/// assert_eq!(s2, "");
+///
+/// // Assigning to `s` would cause the old value to be dropped again,
+/// // resulting in undefined behavior.
+/// // s = String::from("bar"); // ERROR
+///
+/// // `ptr::write` can be used to overwrite a value without dropping it.
+/// ptr::write(&mut s, String::from("bar"));
+/// }
+///
+/// assert_eq!(s, "bar");
+/// ```
+///
+/// [valid]: self#safety
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn read<T>(src: *const T) -> T {
+ // `copy_nonoverlapping` takes care of debug_assert.
+ let mut tmp = MaybeUninit::<T>::uninit();
+ // SAFETY: the caller must guarantee that `src` is valid for reads.
+ // `src` cannot overlap `tmp` because `tmp` was just allocated on
+ // the stack as a separate allocated object.
+ //
+ // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+ // to be properly initialized.
+ unsafe {
+ copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
+ tmp.assume_init()
+ }
+}
+
+/// Reads the value from `src` without moving it. This leaves the
+/// memory in `src` unchanged.
+///
+/// Unlike [`read`], `read_unaligned` works with unaligned pointers.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads.
+///
+/// * `src` must point to a properly initialized value of type `T`.
+///
+/// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
+/// value and the value at `*src` can [violate memory safety][read-ownership].
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL.
+///
+/// [read-ownership]: read#ownership-of-the-returned-value
+/// [valid]: self#safety
+///
+/// ## On `packed` structs
+///
+/// It is currently impossible to create raw pointers to unaligned fields
+/// of a packed struct.
+///
+/// Attempting to create a raw pointer to an `unaligned` struct field with
+/// an expression such as `&packed.unaligned as *const FieldType` creates an
+/// intermediate unaligned reference before converting that to a raw pointer.
+/// That this reference is temporary and immediately cast is inconsequential
+/// as the compiler always expects references to be properly aligned.
+/// As a result, using `&packed.unaligned as *const FieldType` causes immediate
+/// *undefined behavior* in your program.
+///
+/// An example of what not to do and how this relates to `read_unaligned` is:
+///
+/// ```no_run
+/// #[repr(packed, C)]
+/// struct Packed {
+/// _padding: u8,
+/// unaligned: u32,
+/// }
+///
+/// let packed = Packed {
+/// _padding: 0x00,
+/// unaligned: 0x01020304,
+/// };
+///
+/// let v = unsafe {
+/// // Here we attempt to take the address of a 32-bit integer which is not aligned.
+/// let unaligned =
+/// // A temporary unaligned reference is created here which results in
+/// // undefined behavior regardless of whether the reference is used or not.
+/// &packed.unaligned
+/// // Casting to a raw pointer doesn't help; the mistake already happened.
+/// as *const u32;
+///
+/// let v = std::ptr::read_unaligned(unaligned);
+///
+/// v
+/// };
+/// ```
+///
+/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
+// FIXME: Update docs based on outcome of RFC #2582 and friends.
+///
+/// # Examples
+///
+/// Read an usize value from a byte buffer:
+///
+/// ```
+/// use std::mem;
+///
+/// fn read_usize(x: &[u8]) -> usize {
+/// assert!(x.len() >= mem::size_of::<usize>());
+///
+/// let ptr = x.as_ptr() as *const usize;
+///
+/// unsafe { ptr.read_unaligned() }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "ptr_unaligned", since = "1.17.0")]
+pub unsafe fn read_unaligned<T>(src: *const T) -> T {
+ // `copy_nonoverlapping` takes care of debug_assert.
+ let mut tmp = MaybeUninit::<T>::uninit();
+ // SAFETY: the caller must guarantee that `src` is valid for reads.
+ // `src` cannot overlap `tmp` because `tmp` was just allocated on
+ // the stack as a separate allocated object.
+ //
+ // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+ // to be properly initialized.
+ unsafe {
+ copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
+ tmp.assume_init()
+ }
+}
+
+/// Overwrites a memory location with the given value without reading or
+/// dropping the old value.
+///
+/// `write` does not drop the contents of `dst`. This is safe, but it could leak
+/// allocations or resources, so care should be taken not to overwrite an object
+/// that should be dropped.
+///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
+/// This is appropriate for initializing uninitialized memory, or overwriting
+/// memory that has previously been [`read`] from.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes.
+///
+/// * `dst` must be properly aligned. Use [`write_unaligned`] if this is not the
+/// case.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let mut x = 0;
+/// let y = &mut x as *mut i32;
+/// let z = 12;
+///
+/// unsafe {
+/// std::ptr::write(y, z);
+/// assert_eq!(std::ptr::read(y), 12);
+/// }
+/// ```
+///
+/// Manually implement [`mem::swap`]:
+///
+/// ```
+/// use std::ptr;
+///
+/// fn swap<T>(a: &mut T, b: &mut T) {
+/// unsafe {
+/// // Create a bitwise copy of the value at `a` in `tmp`.
+/// let tmp = ptr::read(a);
+///
+/// // Exiting at this point (either by explicitly returning or by
+/// // calling a function which panics) would cause the value in `tmp` to
+/// // be dropped while the same value is still referenced by `a`. This
+/// // could trigger undefined behavior if `T` is not `Copy`.
+///
+/// // Create a bitwise copy of the value at `b` in `a`.
+/// // This is safe because mutable references cannot alias.
+/// ptr::copy_nonoverlapping(b, a, 1);
+///
+/// // As above, exiting here could trigger undefined behavior because
+/// // the same value is referenced by `a` and `b`.
+///
+/// // Move `tmp` into `b`.
+/// ptr::write(b, tmp);
+///
+/// // `tmp` has been moved (`write` takes ownership of its second argument),
+/// // so nothing is dropped implicitly here.
+/// }
+/// }
+///
+/// let mut foo = "foo".to_owned();
+/// let mut bar = "bar".to_owned();
+///
+/// swap(&mut foo, &mut bar);
+///
+/// assert_eq!(foo, "bar");
+/// assert_eq!(bar, "foo");
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn write<T>(dst: *mut T, src: T) {
+ if cfg!(debug_assertions) && !is_aligned_and_not_null(dst) {
+ // Not panicking to keep codegen impact smaller.
+ abort();
+ }
+ // SAFETY: the caller must uphold the safety contract for `move_val_init`.
+ unsafe { intrinsics::move_val_init(&mut *dst, src) }
+}
+
+/// Overwrites a memory location with the given value without reading or
+/// dropping the old value.
+///
+/// Unlike [`write()`], the pointer may be unaligned.
+///
+/// `write_unaligned` does not drop the contents of `dst`. This is safe, but it
+/// could leak allocations or resources, so care should be taken not to overwrite
+/// an object that should be dropped.
+///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
+/// This is appropriate for initializing uninitialized memory, or overwriting
+/// memory that has previously been read with [`read_unaligned`].
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL.
+///
+/// [valid]: self#safety
+///
+/// ## On `packed` structs
+///
+/// It is currently impossible to create raw pointers to unaligned fields
+/// of a packed struct.
+///
+/// Attempting to create a raw pointer to an `unaligned` struct field with
+/// an expression such as `&packed.unaligned as *const FieldType` creates an
+/// intermediate unaligned reference before converting that to a raw pointer.
+/// That this reference is temporary and immediately cast is inconsequential
+/// as the compiler always expects references to be properly aligned.
+/// As a result, using `&packed.unaligned as *const FieldType` causes immediate
+/// *undefined behavior* in your program.
+///
+/// An example of what not to do and how this relates to `write_unaligned` is:
+///
+/// ```no_run
+/// #[repr(packed, C)]
+/// struct Packed {
+/// _padding: u8,
+/// unaligned: u32,
+/// }
+///
+/// let v = 0x01020304;
+/// let mut packed: Packed = unsafe { std::mem::zeroed() };
+///
+/// let v = unsafe {
+/// // Here we attempt to take the address of a 32-bit integer which is not aligned.
+/// let unaligned =
+/// // A temporary unaligned reference is created here which results in
+/// // undefined behavior regardless of whether the reference is used or not.
+/// &mut packed.unaligned
+/// // Casting to a raw pointer doesn't help; the mistake already happened.
+/// as *mut u32;
+///
+/// std::ptr::write_unaligned(unaligned, v);
+///
+/// v
+/// };
+/// ```
+///
+/// Accessing unaligned fields directly with e.g. `packed.unaligned` is safe however.
+// FIXME: Update docs based on outcome of RFC #2582 and friends.
+///
+/// # Examples
+///
+/// Write an usize value to a byte buffer:
+///
+/// ```
+/// use std::mem;
+///
+/// fn write_usize(x: &mut [u8], val: usize) {
+/// assert!(x.len() >= mem::size_of::<usize>());
+///
+/// let ptr = x.as_mut_ptr() as *mut usize;
+///
+/// unsafe { ptr.write_unaligned(val) }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "ptr_unaligned", since = "1.17.0")]
+pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
+ // SAFETY: the caller must guarantee that `dst` is valid for writes.
+ // `dst` cannot overlap `src` because the caller has mutable access
+ // to `dst` while `src` is owned by this function.
+ unsafe {
+ // `copy_nonoverlapping` takes care of debug_assert.
+ copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
+ }
+ mem::forget(src);
+}
+
+/// Performs a volatile read of the value from `src` without moving it. This
+/// leaves the memory in `src` unchanged.
+///
+/// Volatile operations are intended to act on I/O memory, and are guaranteed
+/// to not be elided or reordered by the compiler across other volatile
+/// operations.
+///
+/// # Notes
+///
+/// Rust does not currently have a rigorously and formally defined memory model,
+/// so the precise semantics of what "volatile" means here is subject to change
+/// over time. That being said, the semantics will almost always end up pretty
+/// similar to [C11's definition of volatile][c11].
+///
+/// The compiler shouldn't change the relative order or number of volatile
+/// memory operations. However, volatile memory operations on zero-sized types
+/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
+/// and may be ignored.
+///
+/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `src` must be [valid] for reads.
+///
+/// * `src` must be properly aligned.
+///
+/// * `src` must point to a properly initialized value of type `T`.
+///
+/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
+/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
+/// value and the value at `*src` can [violate memory safety][read-ownership].
+/// However, storing non-[`Copy`] types in volatile memory is almost certainly
+/// incorrect.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+/// [read-ownership]: read#ownership-of-the-returned-value
+///
+/// Just like in C, whether an operation is volatile has no bearing whatsoever
+/// on questions involving concurrent access from multiple threads. Volatile
+/// accesses behave exactly like non-atomic accesses in that regard. In particular,
+/// a race between a `read_volatile` and any write operation to the same location
+/// is undefined behavior.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let x = 12;
+/// let y = &x as *const i32;
+///
+/// unsafe {
+/// assert_eq!(std::ptr::read_volatile(y), 12);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "volatile", since = "1.9.0")]
+pub unsafe fn read_volatile<T>(src: *const T) -> T {
+ if cfg!(debug_assertions) && !is_aligned_and_not_null(src) {
+ // Not panicking to keep codegen impact smaller.
+ abort();
+ }
+ // SAFETY: the caller must uphold the safety contract for `volatile_load`.
+ unsafe { intrinsics::volatile_load(src) }
+}
+
+/// Performs a volatile write of a memory location with the given value without
+/// reading or dropping the old value.
+///
+/// Volatile operations are intended to act on I/O memory, and are guaranteed
+/// to not be elided or reordered by the compiler across other volatile
+/// operations.
+///
+/// `write_volatile` does not drop the contents of `dst`. This is safe, but it
+/// could leak allocations or resources, so care should be taken not to overwrite
+/// an object that should be dropped.
+///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
+/// # Notes
+///
+/// Rust does not currently have a rigorously and formally defined memory model,
+/// so the precise semantics of what "volatile" means here is subject to change
+/// over time. That being said, the semantics will almost always end up pretty
+/// similar to [C11's definition of volatile][c11].
+///
+/// The compiler shouldn't change the relative order or number of volatile
+/// memory operations. However, volatile memory operations on zero-sized types
+/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
+/// and may be ignored.
+///
+/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `dst` must be [valid] for writes.
+///
+/// * `dst` must be properly aligned.
+///
+/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned.
+///
+/// [valid]: self#safety
+///
+/// Just like in C, whether an operation is volatile has no bearing whatsoever
+/// on questions involving concurrent access from multiple threads. Volatile
+/// accesses behave exactly like non-atomic accesses in that regard. In particular,
+/// a race between a `write_volatile` and any other operation (reading or writing)
+/// on the same location is undefined behavior.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let mut x = 0;
+/// let y = &mut x as *mut i32;
+/// let z = 12;
+///
+/// unsafe {
+/// std::ptr::write_volatile(y, z);
+/// assert_eq!(std::ptr::read_volatile(y), 12);
+/// }
+/// ```
+#[inline]
+#[stable(feature = "volatile", since = "1.9.0")]
+pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
+ if cfg!(debug_assertions) && !is_aligned_and_not_null(dst) {
+ // Not panicking to keep codegen impact smaller.
+ abort();
+ }
+ // SAFETY: the caller must uphold the safety contract for `volatile_store`.
+ unsafe {
+ intrinsics::volatile_store(dst, src);
+ }
+}
+
+/// Align pointer `p`.
+///
+/// Calculate offset (in terms of elements of `stride` stride) that has to be applied
+/// to pointer `p` so that pointer `p` would get aligned to `a`.
+///
+/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic.
+/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated
+/// constants.
+///
+/// If we ever decide to make it possible to call the intrinsic with `a` that is not a
+/// power-of-two, it will probably be more prudent to just change to a naive implementation rather
+/// than trying to adapt this to accommodate that change.
+///
+/// Any questions go to @nagisa.
+#[lang = "align_offset"]
+pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
+ // FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
+ // 1, where the method versions of these operations are not inlined.
+ use intrinsics::{
+ unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
+ };
+
+ /// Calculate multiplicative modular inverse of `x` modulo `m`.
+ ///
+ /// This implementation is tailored for `align_offset` and has following preconditions:
+ ///
+ /// * `m` is a power-of-two;
+ /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
+ ///
+ /// Implementation of this function shall not panic. Ever.
+ #[inline]
+ unsafe fn mod_inv(x: usize, m: usize) -> usize {
+ /// Multiplicative modular inverse table modulo 2⁴ = 16.
+ ///
+ /// Note, that this table does not contain values where inverse does not exist (i.e., for
+ /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
+ const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
+ /// Modulo for which the `INV_TABLE_MOD_16` is intended.
+ const INV_TABLE_MOD: usize = 16;
+ /// INV_TABLE_MOD²
+ const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD;
+
+ let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
+ // SAFETY: `m` is required to be a power-of-two, hence non-zero.
+ let m_minus_one = unsafe { unchecked_sub(m, 1) };
+ if m <= INV_TABLE_MOD {
+ table_inverse & m_minus_one
+ } else {
+ // We iterate "up" using the following formula:
+ //
+ // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
+ //
+ // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`.
+ let mut inverse = table_inverse;
+ let mut going_mod = INV_TABLE_MOD_SQUARED;
+ loop {
+ // y = y * (2 - xy) mod n
+ //
+ // Note, that we use wrapping operations here intentionally – the original formula
+ // uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
+ // usize::MAX` instead, because we take the result `mod n` at the end
+ // anyway.
+ inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
+ if going_mod >= m {
+ return inverse & m_minus_one;
+ }
+ going_mod = wrapping_mul(going_mod, going_mod);
+ }
+ }
+ }
+
+ let stride = mem::size_of::<T>();
+ // SAFETY: `a` is a power-of-two, therefore non-zero.
+ let a_minus_one = unsafe { unchecked_sub(a, 1) };
+ if stride == 1 {
+ // `stride == 1` case can be computed more simply through `-p (mod a)`, but doing so
+ // inhibits LLVM's ability to select instructions like `lea`. Instead we compute
+ //
+ // round_up_to_next_alignment(p, a) - p
+ //
+ // which distributes operations around the load-bearing, but pessimizing `and` sufficiently
+ // for LLVM to be able to utilize the various optimizations it knows about.
+ return wrapping_sub(
+ wrapping_add(p as usize, a_minus_one) & wrapping_sub(0, a),
+ p as usize,
+ );
+ }
+
+ let pmoda = p as usize & a_minus_one;
+ if pmoda == 0 {
+ // Already aligned. Yay!
+ return 0;
+ } else if stride == 0 {
+ // If the pointer is not aligned, and the element is zero-sized, then no amount of
+ // elements will ever align the pointer.
+ return usize::MAX;
+ }
+
+ let smoda = stride & a_minus_one;
+ // SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
+ let gcdpow = unsafe { intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)) };
+ // SAFETY: gcdpow has an upper-bound that’s at most the number of bits in an usize.
+ let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
+
+ // SAFETY: gcd is always greater or equal to 1.
+ if p as usize & unsafe { unchecked_sub(gcd, 1) } == 0 {
+ // This branch solves for the following linear congruence equation:
+ //
+ // ` p + so = 0 mod a `
+ //
+ // `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
+ // requested alignment.
+ //
+ // With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
+ // `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
+ //
+ // ` p' + s'o = 0 mod a' `
+ // ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
+ //
+ // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the second
+ // term is "how does incrementing `p` by `s` bytes change the relative alignment of `p`" (again
+ // divided by `g`).
+ // Division by `g` is necessary to make the inverse well formed if `a` and `s` are not
+ // co-prime.
+ //
+ // Furthermore, the result produced by this solution is not "minimal", so it is necessary
+ // to take the result `o mod lcm(s, a)`. We can replace `lcm(s, a)` with just a `a'`.
+
+ // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
+ // `a`.
+ let a2 = unsafe { unchecked_shr(a, gcdpow) };
+ // SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
+ // in `a` (of which it has exactly one).
+ let a2minus1 = unsafe { unchecked_sub(a2, 1) };
+ // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
+ // `a`.
+ let s2 = unsafe { unchecked_shr(smoda, gcdpow) };
+ // SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
+ // `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
+ // always be strictly greater than `(p % a) >> gcdpow`.
+ let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(pmoda, gcdpow)) };
+ // SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
+ // because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
+ return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
+ }
+
+ // Cannot be aligned at all.
+ usize::MAX
+}
+
+/// Compares raw pointers for equality.
+///
+/// This is the same as using the `==` operator, but less generic:
+/// the arguments have to be `*const T` raw pointers,
+/// not anything that implements `PartialEq`.
+///
+/// This can be used to compare `&T` references (which coerce to `*const T` implicitly)
+/// by their address rather than comparing the values they point to
+/// (which is what the `PartialEq for &T` implementation does).
+///
+/// # Examples
+///
+/// ```
+/// use std::ptr;
+///
+/// let five = 5;
+/// let other_five = 5;
+/// let five_ref = &five;
+/// let same_five_ref = &five;
+/// let other_five_ref = &other_five;
+///
+/// assert!(five_ref == same_five_ref);
+/// assert!(ptr::eq(five_ref, same_five_ref));
+///
+/// assert!(five_ref == other_five_ref);
+/// assert!(!ptr::eq(five_ref, other_five_ref));
+/// ```
+///
+/// Slices are also compared by their length (fat pointers):
+///
+/// ```
+/// let a = [1, 2, 3];
+/// assert!(std::ptr::eq(&a[..3], &a[..3]));
+/// assert!(!std::ptr::eq(&a[..2], &a[..3]));
+/// assert!(!std::ptr::eq(&a[0..2], &a[1..3]));
+/// ```
+///
+/// Traits are also compared by their implementation:
+///
+/// ```
+/// #[repr(transparent)]
+/// struct Wrapper { member: i32 }
+///
+/// trait Trait {}
+/// impl Trait for Wrapper {}
+/// impl Trait for i32 {}
+///
+/// let wrapper = Wrapper { member: 10 };
+///
+/// // Pointers have equal addresses.
+/// assert!(std::ptr::eq(
+/// &wrapper as *const Wrapper as *const u8,
+/// &wrapper.member as *const i32 as *const u8
+/// ));
+///
+/// // Objects have equal addresses, but `Trait` has different implementations.
+/// assert!(!std::ptr::eq(
+/// &wrapper as &dyn Trait,
+/// &wrapper.member as &dyn Trait,
+/// ));
+/// assert!(!std::ptr::eq(
+/// &wrapper as &dyn Trait as *const dyn Trait,
+/// &wrapper.member as &dyn Trait as *const dyn Trait,
+/// ));
+///
+/// // Converting the reference to a `*const u8` compares by address.
+/// assert!(std::ptr::eq(
+/// &wrapper as &dyn Trait as *const dyn Trait as *const u8,
+/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8,
+/// ));
+/// ```
+#[stable(feature = "ptr_eq", since = "1.17.0")]
+#[inline]
+pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
+ a == b
+}
+
+/// Hash a raw pointer.
+///
+/// This can be used to hash a `&T` reference (which coerces to `*const T` implicitly)
+/// by its address rather than the value it points to
+/// (which is what the `Hash for &T` implementation does).
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::hash_map::DefaultHasher;
+/// use std::hash::{Hash, Hasher};
+/// use std::ptr;
+///
+/// let five = 5;
+/// let five_ref = &five;
+///
+/// let mut hasher = DefaultHasher::new();
+/// ptr::hash(five_ref, &mut hasher);
+/// let actual = hasher.finish();
+///
+/// let mut hasher = DefaultHasher::new();
+/// (five_ref as *const i32).hash(&mut hasher);
+/// let expected = hasher.finish();
+///
+/// assert_eq!(actual, expected);
+/// ```
+#[stable(feature = "ptr_hash", since = "1.35.0")]
+pub fn hash<T: ?Sized, S: hash::Hasher>(hashee: *const T, into: &mut S) {
+ use crate::hash::Hash;
+ hashee.hash(into);
+}
+
+// Impls for function pointers
+macro_rules! fnptr_impls_safety_abi {
+ ($FnTy: ty, $($Arg: ident),*) => {
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> PartialEq for $FnTy {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ *self as usize == *other as usize
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> Eq for $FnTy {}
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> PartialOrd for $FnTy {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ (*self as usize).partial_cmp(&(*other as usize))
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> Ord for $FnTy {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ (*self as usize).cmp(&(*other as usize))
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> hash::Hash for $FnTy {
+ fn hash<HH: hash::Hasher>(&self, state: &mut HH) {
+ state.write_usize(*self as usize)
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> fmt::Pointer for $FnTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // HACK: The intermediate cast as usize is required for AVR
+ // so that the address space of the source function pointer
+ // is preserved in the final function pointer.
+ //
+ // https://github.com/avr-rust/rust/issues/143
+ fmt::Pointer::fmt(&(*self as usize as *const ()), f)
+ }
+ }
+
+ #[stable(feature = "fnptr_impls", since = "1.4.0")]
+ impl<Ret, $($Arg),*> fmt::Debug for $FnTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // HACK: The intermediate cast as usize is required for AVR
+ // so that the address space of the source function pointer
+ // is preserved in the final function pointer.
+ //
+ // https://github.com/avr-rust/rust/issues/143
+ fmt::Pointer::fmt(&(*self as usize as *const ()), f)
+ }
+ }
+ }
+}
+
+macro_rules! fnptr_impls_args {
+ ($($Arg: ident),+) => {
+ fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+) -> Ret, $($Arg),+ }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),+ , ...) -> Ret, $($Arg),+ }
+ };
+ () => {
+ // No variadic functions with 0 parameters
+ fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, }
+ fnptr_impls_safety_abi! { extern "C" fn() -> Ret, }
+ fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, }
+ fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, }
+ };
+}
+
+fnptr_impls_args! {}
+fnptr_impls_args! { A }
+fnptr_impls_args! { A, B }
+fnptr_impls_args! { A, B, C }
+fnptr_impls_args! { A, B, C, D }
+fnptr_impls_args! { A, B, C, D, E }
+fnptr_impls_args! { A, B, C, D, E, F }
+fnptr_impls_args! { A, B, C, D, E, F, G }
+fnptr_impls_args! { A, B, C, D, E, F, G, H }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K }
+fnptr_impls_args! { A, B, C, D, E, F, G, H, I, J, K, L }
+
+/// Create a `const` raw pointer to a place, without creating an intermediate reference.
+///
+/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
+/// and points to initialized data. For cases where those requirements do not hold,
+/// raw pointers should be used instead. However, `&expr as *const _` creates a reference
+/// before casting it to a raw pointer, and that reference is subject to the same rules
+/// as all other references. This macro can create a raw pointer *without* creating
+/// a reference first.
+///
+/// # Example
+///
+/// ```
+/// #![feature(raw_ref_macros)]
+/// use std::ptr;
+///
+/// #[repr(packed)]
+/// struct Packed {
+/// f1: u8,
+/// f2: u16,
+/// }
+///
+/// let packed = Packed { f1: 1, f2: 2 };
+/// // `&packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
+/// let raw_f2 = ptr::raw_const!(packed.f2);
+/// assert_eq!(unsafe { raw_f2.read_unaligned() }, 2);
+/// ```
+#[unstable(feature = "raw_ref_macros", issue = "73394")]
+#[rustc_macro_transparency = "semitransparent"]
+#[allow_internal_unstable(raw_ref_op)]
+pub macro raw_const($e:expr) {
+ &raw const $e
+}
+
+/// Create a `mut` raw pointer to a place, without creating an intermediate reference.
+///
+/// Creating a reference with `&`/`&mut` is only allowed if the pointer is properly aligned
+/// and points to initialized data. For cases where those requirements do not hold,
+/// raw pointers should be used instead. However, `&mut expr as *mut _` creates a reference
+/// before casting it to a raw pointer, and that reference is subject to the same rules
+/// as all other references. This macro can create a raw pointer *without* creating
+/// a reference first.
+///
+/// # Example
+///
+/// ```
+/// #![feature(raw_ref_macros)]
+/// use std::ptr;
+///
+/// #[repr(packed)]
+/// struct Packed {
+/// f1: u8,
+/// f2: u16,
+/// }
+///
+/// let mut packed = Packed { f1: 1, f2: 2 };
+/// // `&mut packed.f2` would create an unaligned reference, and thus be Undefined Behavior!
+/// let raw_f2 = ptr::raw_mut!(packed.f2);
+/// unsafe { raw_f2.write_unaligned(42); }
+/// assert_eq!({packed.f2}, 42); // `{...}` forces copying the field instead of creating a reference.
+/// ```
+#[unstable(feature = "raw_ref_macros", issue = "73394")]
+#[rustc_macro_transparency = "semitransparent"]
+#[allow_internal_unstable(raw_ref_op)]
+pub macro raw_mut($e:expr) {
+ &raw mut $e
+}
--- /dev/null
+use super::*;
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
+use crate::intrinsics;
+use crate::slice::{self, SliceIndex};
+
+#[lang = "mut_ptr"]
+impl<T: ?Sized> *mut T {
+ /// Returns `true` if the pointer is null.
+ ///
+ /// Note that unsized types have many possible null pointers, as only the
+ /// raw data pointer is considered, not their length, vtable, etc.
+ /// Therefore, two pointers that are null may still not compare equal to
+ /// each other.
+ ///
+ /// ## Behavior during const evaluation
+ ///
+ /// When this function is used during const evaluation, it may return `false` for pointers
+ /// that turn out to be null at runtime. Specifically, when a pointer to some memory
+ /// is offset beyond its bounds in such a way that the resulting pointer is null,
+ /// the function will still return `false`. There is no way for CTFE to know
+ /// the absolute position of that memory, so we cannot tell if the pointer is
+ /// null or not.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ /// assert!(!ptr.is_null());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ptr_is_null", issue = "74939")]
+ #[inline]
+ pub const fn is_null(self) -> bool {
+ // Compare via a cast to a thin pointer, so fat pointers are only
+ // considering their "data" part for null-ness.
+ (self as *mut u8).guaranteed_eq(null_mut())
+ }
+
+ /// Casts to a pointer of another type.
+ #[stable(feature = "ptr_cast", since = "1.38.0")]
+ #[rustc_const_stable(feature = "const_ptr_cast", since = "1.38.0")]
+ #[inline]
+ pub const fn cast<U>(self) -> *mut U {
+ self as _
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_ref`]
+ /// must be used instead.
+ ///
+ /// For the mutable counterpart see [`as_mut`].
+ ///
+ /// [`as_uninit_ref`]: #method.as_uninit_ref-1
+ /// [`as_mut`]: #method.as_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_ref() {
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can
+ /// dereference the pointer directly.
+ ///
+ /// ```
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// let val_back = &*ptr;
+ /// println!("We got back the value: {}!", val_back);
+ /// }
+ /// ```
+ #[stable(feature = "ptr_as_ref", since = "1.9.0")]
+ #[inline]
+ pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
+ // SAFETY: the caller must guarantee that `self` is valid for a
+ // reference if it isn't null.
+ if self.is_null() { None } else { unsafe { Some(&*self) } }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared reference to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_mut`].
+ ///
+ /// [`as_ref`]: #method.as_ref-1
+ /// [`as_uninit_mut`]: #method.as_uninit_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(ptr_as_uninit)]
+ ///
+ /// let ptr: *mut u8 = &mut 10u8 as *mut u8;
+ ///
+ /// unsafe {
+ /// if let Some(val_back) = ptr.as_uninit_ref() {
+ /// println!("We got back the value: {}!", val_back.assume_init());
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_ref<'a>(self) -> Option<&'a MaybeUninit<T>>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) }
+ }
+
+ /// Calculates the offset from a pointer.
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum, **in bytes** must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_offset`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_offset`]: #method.wrapping_offset
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.offset(1));
+ /// println!("{}", *ptr.offset(2));
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const unsafe fn offset(self, count: isize) -> *mut T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ // The obtained pointer is valid for writes since the caller must
+ // guarantee that it points to the same allocated object as `self`.
+ unsafe { intrinsics::offset(self, count) as *mut T }
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer does not need to be in bounds, but it is
+ /// potentially hazardous to dereference (which requires `unsafe`).
+ ///
+ /// In particular, the resulting pointer remains attached to the same allocated
+ /// object that `self` points to. It may *not* be used to access a
+ /// different allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// In other words, `x.wrapping_offset((y as usize).wrapping_sub(x as usize) / size_of::<T>())`
+ /// is *not* the same as `y`, and dereferencing it is undefined behavior
+ /// unless `x` and `y` point into the same allocated object.
+ ///
+ /// Compared to [`offset`], this method basically delays the requirement of staying
+ /// within the same allocated object: [`offset`] is immediate Undefined Behavior when
+ /// crossing object boundaries; `wrapping_offset` produces a pointer but still leads
+ /// to Undefined Behavior if that pointer is dereferenced. [`offset`] can be optimized
+ /// better and is thus preferable in performance-sensitive code.
+ ///
+ /// If you need to cross object boundaries, cast the pointer to an integer and
+ /// do the arithmetic there.
+ ///
+ /// [`offset`]: #method.offset
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let mut data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *mut u8 = data.as_mut_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_offset(6);
+ ///
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// *ptr = 0;
+ /// }
+ /// ptr = ptr.wrapping_offset(step);
+ /// }
+ /// assert_eq!(&data, &[0, 2, 0, 4, 0]);
+ /// ```
+ #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn wrapping_offset(self, count: isize) -> *mut T
+ where
+ T: Sized,
+ {
+ // SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
+ unsafe { intrinsics::arith_offset(self, count) as *mut T }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a unique reference to
+ /// the value wrapped in `Some`. If the value may be uninitialized, [`as_uninit_mut`]
+ /// must be used instead.
+ ///
+ /// For the shared counterpart see [`as_ref`].
+ ///
+ /// [`as_uninit_mut`]: #method.as_uninit_mut
+ /// [`as_ref`]: #method.as_ref-1
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ /// let first_value = unsafe { ptr.as_mut().unwrap() };
+ /// *first_value = 4;
+ /// # assert_eq!(s, [4, 2, 3]);
+ /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
+ /// ```
+ ///
+ /// # Null-unchecked version
+ ///
+ /// If you are sure the pointer can never be null and are looking for some kind of
+ /// `as_mut_unchecked` that returns the `&mut T` instead of `Option<&mut T>`, know that
+ /// you can dereference the pointer directly.
+ ///
+ /// ```
+ /// let mut s = [1, 2, 3];
+ /// let ptr: *mut u32 = s.as_mut_ptr();
+ /// let first_value = unsafe { &mut *ptr };
+ /// *first_value = 4;
+ /// # assert_eq!(s, [4, 2, 3]);
+ /// println!("{:?}", s); // It'll print: "[4, 2, 3]".
+ /// ```
+ #[stable(feature = "ptr_as_ref", since = "1.9.0")]
+ #[inline]
+ pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
+ // SAFETY: the caller must guarantee that `self` is be valid for
+ // a mutable reference if it isn't null.
+ if self.is_null() { None } else { unsafe { Some(&mut *self) } }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a unique reference to
+ /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_ref`].
+ ///
+ /// [`as_mut`]: #method.as_mut
+ /// [`as_uninit_ref`]: #method.as_uninit_ref-1
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_mut<'a>(self) -> Option<&'a mut MaybeUninit<T>>
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ if self.is_null() { None } else { Some(unsafe { &mut *(self as *mut MaybeUninit<T>) }) }
+ }
+
+ /// Returns whether two pointers are guaranteed to be equal.
+ ///
+ /// At runtime this function behaves like `self == other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine equality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be equal.
+ /// But when it returns `true`, the pointers are guaranteed to be equal.
+ ///
+ /// This function is the mirror of [`guaranteed_ne`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_ne`]: #method.guaranteed_ne
+ ///
+ /// The return value may change depending on the compiler version and unsafe code may not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const fn guaranteed_eq(self, other: *mut T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_eq(self as *const _, other as *const _)
+ }
+
+ /// Returns whether two pointers are guaranteed to be unequal.
+ ///
+ /// At runtime this function behaves like `self != other`.
+ /// However, in some contexts (e.g., compile-time evaluation),
+ /// it is not always possible to determine the inequality of two pointers, so this function may
+ /// spuriously return `false` for pointers that later actually turn out to be unequal.
+ /// But when it returns `true`, the pointers are guaranteed to be unequal.
+ ///
+ /// This function is the mirror of [`guaranteed_eq`], but not its inverse. There are pointer
+ /// comparisons for which both functions return `false`.
+ ///
+ /// [`guaranteed_eq`]: #method.guaranteed_eq
+ ///
+ /// The return value may change depending on the compiler version and unsafe code may not
+ /// rely on the result of this function for soundness. It is suggested to only use this function
+ /// for performance optimizations where spurious `false` return values by this function do not
+ /// affect the outcome, but just the performance.
+ /// The consequences of using this method to make runtime and compile-time code behave
+ /// differently have not been explored. This method should not be used to introduce such
+ /// differences, and it should also not be stabilized before we have a better understanding
+ /// of this issue.
+ #[unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
+ #[inline]
+ pub const unsafe fn guaranteed_ne(self, other: *mut T) -> bool
+ where
+ T: Sized,
+ {
+ intrinsics::ptr_guaranteed_ne(self as *const _, other as *const _)
+ }
+
+ /// Calculates the distance between two pointers. The returned value is in
+ /// units of T: the distance in bytes is divided by `mem::size_of::<T>()`.
+ ///
+ /// This function is the inverse of [`offset`].
+ ///
+ /// [`offset`]: #method.offset-1
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and other pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * Both pointers must be *derived from* a pointer to the same object.
+ /// (See below for an example.)
+ ///
+ /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The distance between the pointers, in bytes, must be an exact multiple
+ /// of the size of `T`.
+ ///
+ /// * The distance being in bounds cannot rely on "wrapping around" the address space.
+ ///
+ /// The compiler and standard library generally try to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if `T` is a Zero-Sized Type ("ZST").
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut a = [0; 5];
+ /// let ptr1: *mut i32 = &mut a[1];
+ /// let ptr2: *mut i32 = &mut a[3];
+ /// unsafe {
+ /// assert_eq!(ptr2.offset_from(ptr1), 2);
+ /// assert_eq!(ptr1.offset_from(ptr2), -2);
+ /// assert_eq!(ptr1.offset(2), ptr2);
+ /// assert_eq!(ptr2.offset(-2), ptr1);
+ /// }
+ /// ```
+ ///
+ /// *Incorrect* usage:
+ ///
+ /// ```rust,no_run
+ /// let ptr1 = Box::into_raw(Box::new(0u8));
+ /// let ptr2 = Box::into_raw(Box::new(1u8));
+ /// let diff = (ptr2 as isize).wrapping_sub(ptr1 as isize);
+ /// // Make ptr2_other an "alias" of ptr2, but derived from ptr1.
+ /// let ptr2_other = (ptr1 as *mut u8).wrapping_offset(diff);
+ /// assert_eq!(ptr2 as usize, ptr2_other as usize);
+ /// // Since ptr2_other and ptr2 are derived from pointers to different objects,
+ /// // computing their offset is undefined behavior, even though
+ /// // they point to the same address!
+ /// unsafe {
+ /// let zero = ptr2_other.offset_from(ptr2); // Undefined Behavior
+ /// }
+ /// ```
+ #[stable(feature = "ptr_offset_from", since = "1.47.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset_from", issue = "41079")]
+ #[inline]
+ pub const unsafe fn offset_from(self, origin: *const T) -> isize
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset_from`.
+ unsafe { (self as *const T).offset_from(origin) }
+ }
+
+ /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * The computed offset, **in bytes**, cannot overflow an `isize`.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a `usize`.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_add`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_add`]: #method.wrapping_add
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ /// let ptr: *const u8 = s.as_ptr();
+ ///
+ /// unsafe {
+ /// println!("{}", *ptr.add(1) as char);
+ /// println!("{}", *ptr.add(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const unsafe fn add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset(count as isize) }
+ }
+
+ /// Calculates the offset from a pointer (convenience for
+ /// `.offset((count as isize).wrapping_neg())`).
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is Undefined
+ /// Behavior:
+ ///
+ /// * Both the starting and resulting pointer must be either in bounds or one
+ /// byte past the end of the same allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+ ///
+ /// * The offset being in bounds cannot rely on "wrapping around" the address
+ /// space. That is, the infinite-precision sum must fit in a usize.
+ ///
+ /// The compiler and standard library generally tries to ensure allocations
+ /// never reach a size where an offset is a concern. For instance, `Vec`
+ /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so
+ /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+ ///
+ /// Most platforms fundamentally can't even construct such an allocation.
+ /// For instance, no known 64-bit platform can ever serve a request
+ /// for 2<sup>63</sup> bytes due to page-table limitations or splitting the address space.
+ /// However, some 32-bit and 16-bit platforms may successfully serve a request for
+ /// more than `isize::MAX` bytes with things like Physical Address
+ /// Extension. As such, memory acquired directly from allocators or memory
+ /// mapped files *may* be too large to handle with this function.
+ ///
+ /// Consider using [`wrapping_sub`] instead if these constraints are
+ /// difficult to satisfy. The only advantage of this method is that it
+ /// enables more aggressive compiler optimizations.
+ ///
+ /// [`wrapping_sub`]: #method.wrapping_sub
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "123";
+ ///
+ /// unsafe {
+ /// let end: *const u8 = s.as_ptr().add(3);
+ /// println!("{}", *end.sub(1) as char);
+ /// println!("{}", *end.sub(2) as char);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const unsafe fn sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `offset`.
+ unsafe { self.offset((count as isize).wrapping_neg()) }
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset(count as isize)`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer does not need to be in bounds, but it is
+ /// potentially hazardous to dereference (which requires `unsafe`).
+ ///
+ /// In particular, the resulting pointer remains attached to the same allocated
+ /// object that `self` points to. It may *not* be used to access a
+ /// different allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// Compared to [`add`], this method basically delays the requirement of staying
+ /// within the same allocated object: [`add`] is immediate Undefined Behavior when
+ /// crossing object boundaries; `wrapping_add` produces a pointer but still leads
+ /// to Undefined Behavior if that pointer is dereferenced. [`add`] can be optimized
+ /// better and is thus preferable in performance-sensitive code.
+ ///
+ /// If you need to cross object boundaries, cast the pointer to an integer and
+ /// do the arithmetic there.
+ ///
+ /// [`add`]: #method.add
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let step = 2;
+ /// let end_rounded_up = ptr.wrapping_add(6);
+ ///
+ /// // This loop prints "1, 3, 5, "
+ /// while ptr != end_rounded_up {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_add(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn wrapping_add(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset(count as isize)
+ }
+
+ /// Calculates the offset from a pointer using wrapping arithmetic.
+ /// (convenience for `.wrapping_offset((count as isize).wrapping_sub())`)
+ ///
+ /// `count` is in units of T; e.g., a `count` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The resulting pointer does not need to be in bounds, but it is
+ /// potentially hazardous to dereference (which requires `unsafe`).
+ ///
+ /// In particular, the resulting pointer remains attached to the same allocated
+ /// object that `self` points to. It may *not* be used to access a
+ /// different allocated object. Note that in Rust,
+ /// every (stack-allocated) variable is considered a separate allocated object.
+ ///
+ /// Compared to [`sub`], this method basically delays the requirement of staying
+ /// within the same allocated object: [`sub`] is immediate Undefined Behavior when
+ /// crossing object boundaries; `wrapping_sub` produces a pointer but still leads
+ /// to Undefined Behavior if that pointer is dereferenced. [`sub`] can be optimized
+ /// better and is thus preferable in performance-sensitive code.
+ ///
+ /// If you need to cross object boundaries, cast the pointer to an integer and
+ /// do the arithmetic there.
+ ///
+ /// [`sub`]: #method.sub
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // Iterate using a raw pointer in increments of two elements (backwards)
+ /// let data = [1u8, 2, 3, 4, 5];
+ /// let mut ptr: *const u8 = data.as_ptr();
+ /// let start_rounded_down = ptr.wrapping_sub(2);
+ /// ptr = ptr.wrapping_add(4);
+ /// let step = 2;
+ /// // This loop prints "5, 3, 1, "
+ /// while ptr != start_rounded_down {
+ /// unsafe {
+ /// print!("{}, ", *ptr);
+ /// }
+ /// ptr = ptr.wrapping_sub(step);
+ /// }
+ /// ```
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn wrapping_sub(self, count: usize) -> Self
+ where
+ T: Sized,
+ {
+ self.wrapping_offset((count as isize).wrapping_neg())
+ }
+
+ /// Sets the pointer value to `ptr`.
+ ///
+ /// In case `self` is a (fat) pointer to an unsized type, this operation
+ /// will only affect the pointer part, whereas for (thin) pointers to
+ /// sized types, this has the same effect as a simple assignment.
+ ///
+ /// The resulting pointer will have provenance of `val`, i.e., for a fat
+ /// pointer, this operation is semantically the same as creating a new
+ /// fat pointer with the data pointer value of `val` but the metadata of
+ /// `self`.
+ ///
+ /// # Examples
+ ///
+ /// This function is primarily useful for allowing byte-wise pointer
+ /// arithmetic on potentially fat pointers:
+ ///
+ /// ```
+ /// #![feature(set_ptr_value)]
+ /// # use core::fmt::Debug;
+ /// let mut arr: [i32; 3] = [1, 2, 3];
+ /// let mut ptr = &mut arr[0] as *mut dyn Debug;
+ /// let thin = ptr as *mut u8;
+ /// unsafe {
+ /// ptr = ptr.set_ptr_value(thin.add(8));
+ /// # assert_eq!(*(ptr as *mut i32), 3);
+ /// println!("{:?}", &*ptr); // will print "3"
+ /// }
+ /// ```
+ #[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline]
+ pub fn set_ptr_value(mut self, val: *mut u8) -> Self {
+ let thin = &mut self as *mut *mut T as *mut *mut u8;
+ // SAFETY: In case of a thin pointer, this operations is identical
+ // to a simple assignment. In case of a fat pointer, with the current
+ // fat pointer layout implementation, the first field of such a
+ // pointer is always the data pointer, which is likewise assigned.
+ unsafe { *thin = val };
+ self
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// See [`ptr::read`] for safety concerns and examples.
+ ///
+ /// [`ptr::read`]: ./ptr/fn.read.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn read(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for ``.
+ unsafe { read(self) }
+ }
+
+ /// Performs a volatile read of the value from `self` without moving it. This
+ /// leaves the memory in `self` unchanged.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::read_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_volatile`]: ./ptr/fn.read_volatile.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn read_volatile(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+ unsafe { read_volatile(self) }
+ }
+
+ /// Reads the value from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// Unlike `read`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::read_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::read_unaligned`]: ./ptr/fn.read_unaligned.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn read_unaligned(self) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+ unsafe { read_unaligned(self) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: ./ptr/fn.copy.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn copy_to(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { copy(self, dest, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *same* argument order as [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { copy_nonoverlapping(self, dest, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy`].
+ ///
+ /// See [`ptr::copy`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy`]: ./ptr/fn.copy.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn copy_from(self, src: *const T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy`.
+ unsafe { copy(src, self, count) }
+ }
+
+ /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// NOTE: this has the *opposite* argument order of [`ptr::copy_nonoverlapping`].
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns and examples.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: ./ptr/fn.copy_nonoverlapping.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+ unsafe { copy_nonoverlapping(src, self, count) }
+ }
+
+ /// Executes the destructor (if any) of the pointed-to value.
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns and examples.
+ ///
+ /// [`ptr::drop_in_place`]: ./ptr/fn.drop_in_place.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn drop_in_place(self) {
+ // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+ unsafe { drop_in_place(self) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// See [`ptr::write`] for safety concerns and examples.
+ ///
+ /// [`ptr::write`]: ./ptr/fn.write.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn write(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write`.
+ unsafe { write(self, val) }
+ }
+
+ /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
+ /// bytes of memory starting at `self` to `val`.
+ ///
+ /// See [`ptr::write_bytes`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_bytes`]: ./ptr/fn.write_bytes.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn write_bytes(self, val: u8, count: usize)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+ unsafe { write_bytes(self, val, count) }
+ }
+
+ /// Performs a volatile write of a memory location with the given value without
+ /// reading or dropping the old value.
+ ///
+ /// Volatile operations are intended to act on I/O memory, and are guaranteed
+ /// to not be elided or reordered by the compiler across other volatile
+ /// operations.
+ ///
+ /// See [`ptr::write_volatile`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_volatile`]: ./ptr/fn.write_volatile.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn write_volatile(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+ unsafe { write_volatile(self, val) }
+ }
+
+ /// Overwrites a memory location with the given value without reading or
+ /// dropping the old value.
+ ///
+ /// Unlike `write`, the pointer may be unaligned.
+ ///
+ /// See [`ptr::write_unaligned`] for safety concerns and examples.
+ ///
+ /// [`ptr::write_unaligned`]: ./ptr/fn.write_unaligned.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn write_unaligned(self, val: T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+ unsafe { write_unaligned(self, val) }
+ }
+
+ /// Replaces the value at `self` with `src`, returning the old
+ /// value, without dropping either.
+ ///
+ /// See [`ptr::replace`] for safety concerns and examples.
+ ///
+ /// [`ptr::replace`]: ./ptr/fn.replace.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn replace(self, src: T) -> T
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `replace`.
+ unsafe { replace(self, src) }
+ }
+
+ /// Swaps the values at two mutable locations of the same type, without
+ /// deinitializing either. They may overlap, unlike `mem::swap` which is
+ /// otherwise equivalent.
+ ///
+ /// See [`ptr::swap`] for safety concerns and examples.
+ ///
+ /// [`ptr::swap`]: ./ptr/fn.swap.html
+ #[stable(feature = "pointer_methods", since = "1.26.0")]
+ #[inline]
+ pub unsafe fn swap(self, with: *mut T)
+ where
+ T: Sized,
+ {
+ // SAFETY: the caller must uphold the safety contract for `swap`.
+ unsafe { swap(self, with) }
+ }
+
+ /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
+ /// `align`.
+ ///
+ /// If it is not possible to align the pointer, the implementation returns
+ /// `usize::MAX`. It is permissible for the implementation to *always*
+ /// return `usize::MAX`. Only your algorithm's performance can depend
+ /// on getting a usable offset here, not its correctness.
+ ///
+ /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be
+ /// used with the `wrapping_add` method.
+ ///
+ /// There are no guarantees whatsoever that offsetting the pointer will not overflow or go
+ /// beyond the allocation that the pointer points into. It is up to the caller to ensure that
+ /// the returned offset is correct in all terms other than alignment.
+ ///
+ /// # Panics
+ ///
+ /// The function panics if `align` is not a power-of-two.
+ ///
+ /// # Examples
+ ///
+ /// Accessing adjacent `u8` as `u16`
+ ///
+ /// ```
+ /// # fn foo(n: usize) {
+ /// # use std::mem::align_of;
+ /// # unsafe {
+ /// let x = [5u8, 6u8, 7u8, 8u8, 9u8];
+ /// let ptr = x.as_ptr().add(n) as *const u8;
+ /// let offset = ptr.align_offset(align_of::<u16>());
+ /// if offset < x.len() - n - 1 {
+ /// let u16_ptr = ptr.add(offset) as *const u16;
+ /// assert_ne!(*u16_ptr, 500);
+ /// } else {
+ /// // while the pointer can be aligned via `offset`, it would point
+ /// // outside the allocation
+ /// }
+ /// # } }
+ /// ```
+ #[stable(feature = "align_offset", since = "1.36.0")]
+ pub fn align_offset(self, align: usize) -> usize
+ where
+ T: Sized,
+ {
+ if !align.is_power_of_two() {
+ panic!("align_offset: align is not a power-of-two");
+ }
+ // SAFETY: `align` has been checked to be a power of 2 above
+ unsafe { align_offset(self, align) }
+ }
+}
+
+#[lang = "mut_slice_ptr"]
+impl<T> *mut [T] {
+ /// Returns the length of a raw slice.
+ ///
+ /// The returned value is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, even when the raw slice cannot be cast to a slice
+ /// reference because the pointer is null or unaligned.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_len)]
+ /// use std::ptr;
+ ///
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert_eq!(slice.len(), 3);
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ pub const fn len(self) -> usize {
+ // SAFETY: this is safe because `*const [T]` and `FatPtr<T>` have the same layout.
+ // Only `std` can make this guarantee.
+ unsafe { Repr { rust_mut: self }.raw }.len
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// This is equivalent to casting `self` to `*mut T`, but more type-safe.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get)]
+ /// use std::ptr;
+ ///
+ /// let slice: *mut [i8] = ptr::slice_from_raw_parts_mut(ptr::null_mut(), 3);
+ /// assert_eq!(slice.as_mut_ptr(), 0 as *mut i8);
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_mut_ptr(self) -> *mut T {
+ self as *mut T
+ }
+
+ /// Returns a raw pointer to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// Calling this method with an out-of-bounds index or when `self` is not dereferencable
+ /// is *[undefined behavior]* even if the resulting pointer is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_get)]
+ ///
+ /// let x = &mut [1, 2, 4] as *mut [i32];
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked_mut(1), x.as_mut_ptr().add(1));
+ /// }
+ /// ```
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[inline]
+ pub unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
+ where
+ I: SliceIndex<[T]>,
+ {
+ // SAFETY: the caller ensures that `self` is dereferencable and `index` in-bounds.
+ unsafe { index.get_unchecked_mut(self) }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a shared slice to
+ /// the value wrapped in `Some`. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_slice_mut`].
+ ///
+ /// [`as_ref`]: #method.as_ref-1
+ /// [`as_uninit_slice_mut`]: #method.as_uninit_slice_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+ /// and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts`][].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [`NonNull::dangling()`]: NonNull::dangling
+ /// [`pointer::offset`]: ../std/primitive.pointer.html#method.offset
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_slice<'a>(self) -> Option<&'a [MaybeUninit<T>]> {
+ if self.is_null() {
+ None
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
+ Some(unsafe { slice::from_raw_parts(self as *const MaybeUninit<T>, self.len()) })
+ }
+ }
+
+ /// Returns `None` if the pointer is null, or else returns a unique slice to
+ /// the value wrapped in `Some`. In contrast to [`as_mut`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_slice`].
+ ///
+ /// [`as_mut`]: #method.as_mut
+ /// [`as_uninit_slice`]: #method.as_uninit_slice-1
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that *either* the pointer is NULL *or*
+ /// all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
+ /// many bytes, and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts_mut`][].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [`NonNull::dangling()`]: NonNull::dangling
+ /// [`pointer::offset`]: ../std/primitive.pointer.html#method.offset
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_slice_mut<'a>(self) -> Option<&'a mut [MaybeUninit<T>]> {
+ if self.is_null() {
+ None
+ } else {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`.
+ Some(unsafe { slice::from_raw_parts_mut(self as *mut MaybeUninit<T>, self.len()) })
+ }
+ }
+}
+
+// Equality for pointers
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialEq for *mut T {
+ #[inline]
+ fn eq(&self, other: &*mut T) -> bool {
+ *self == *other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Eq for *mut T {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Ord for *mut T {
+ #[inline]
+ fn cmp(&self, other: &*mut T) -> Ordering {
+ if self < other {
+ Less
+ } else if self == other {
+ Equal
+ } else {
+ Greater
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> PartialOrd for *mut T {
+ #[inline]
+ fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+
+ #[inline]
+ fn lt(&self, other: &*mut T) -> bool {
+ *self < *other
+ }
+
+ #[inline]
+ fn le(&self, other: &*mut T) -> bool {
+ *self <= *other
+ }
+
+ #[inline]
+ fn gt(&self, other: &*mut T) -> bool {
+ *self > *other
+ }
+
+ #[inline]
+ fn ge(&self, other: &*mut T) -> bool {
+ *self >= *other
+ }
+}
--- /dev/null
+use crate::cmp::Ordering;
+use crate::convert::From;
+use crate::fmt;
+use crate::hash;
+use crate::marker::Unsize;
+use crate::mem::{self, MaybeUninit};
+use crate::ops::{CoerceUnsized, DispatchFromDyn};
+use crate::ptr::Unique;
+use crate::slice::{self, SliceIndex};
+
+/// `*mut T` but non-zero and covariant.
+///
+/// This is often the correct thing to use when building data structures using
+/// raw pointers, but is ultimately more dangerous to use because of its additional
+/// properties. If you're not sure if you should use `NonNull<T>`, just use `*mut T`!
+///
+/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
+/// is never dereferenced. This is so that enums may use this forbidden value
+/// as a discriminant -- `Option<NonNull<T>>` has the same size as `*mut T`.
+/// However the pointer may still dangle if it isn't dereferenced.
+///
+/// Unlike `*mut T`, `NonNull<T>` is covariant over `T`. If this is incorrect
+/// for your use case, you should include some [`PhantomData`] in your type to
+/// provide invariance, such as `PhantomData<Cell<T>>` or `PhantomData<&'a mut T>`.
+/// Usually this won't be necessary; covariance is correct for most safe abstractions,
+/// such as `Box`, `Rc`, `Arc`, `Vec`, and `LinkedList`. This is the case because they
+/// provide a public API that follows the normal shared XOR mutable rules of Rust.
+///
+/// Notice that `NonNull<T>` has a `From` instance for `&T`. However, this does
+/// not change the fact that mutating through a (pointer derived from a) shared
+/// reference is undefined behavior unless the mutation happens inside an
+/// [`UnsafeCell<T>`]. The same goes for creating a mutable reference from a shared
+/// reference. When using this `From` instance without an `UnsafeCell<T>`,
+/// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr`
+/// is never used for mutation.
+///
+/// [`PhantomData`]: crate::marker::PhantomData
+/// [`UnsafeCell<T>`]: crate::cell::UnsafeCell
+#[stable(feature = "nonnull", since = "1.25.0")]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(1)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct NonNull<T: ?Sized> {
+ pointer: *const T,
+}
+
+/// `NonNull` pointers are not `Send` because the data they reference may be aliased.
+// N.B., this impl is unnecessary, but should provide better error messages.
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> !Send for NonNull<T> {}
+
+/// `NonNull` pointers are not `Sync` because the data they reference may be aliased.
+// N.B., this impl is unnecessary, but should provide better error messages.
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> !Sync for NonNull<T> {}
+
+impl<T: Sized> NonNull<T> {
+ /// Creates a new `NonNull` that is dangling, but well-aligned.
+ ///
+ /// This is useful for initializing types which lazily allocate, like
+ /// `Vec::new` does.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer to
+ /// a `T`, which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_stable(feature = "const_nonnull_dangling", since = "1.32.0")]
+ #[inline]
+ pub const fn dangling() -> Self {
+ // SAFETY: mem::align_of() returns a non-zero usize which is then casted
+ // to a *mut T. Therefore, `ptr` is not null and the conditions for
+ // calling new_unchecked() are respected.
+ unsafe {
+ let ptr = mem::align_of::<T>() as *mut T;
+ NonNull::new_unchecked(ptr)
+ }
+ }
+
+ /// Returns a shared references to the value. In contrast to [`as_ref`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_mut`].
+ ///
+ /// [`as_ref`]: NonNull::as_ref
+ /// [`as_uninit_mut`]: NonNull::as_uninit_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_ref(&self) -> &MaybeUninit<T> {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &*self.cast().as_ptr() }
+ }
+
+ /// Returns a unique references to the value. In contrast to [`as_mut`], this does not require
+ /// that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_ref`].
+ ///
+ /// [`as_mut`]: NonNull::as_mut
+ /// [`as_uninit_ref`]: NonNull::as_uninit_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_mut(&mut self) -> &mut MaybeUninit<T> {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &mut *self.cast().as_ptr() }
+ }
+}
+
+impl<T: ?Sized> NonNull<T> {
+ /// Creates a new `NonNull`.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null.
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_stable(feature = "const_nonnull_new_unchecked", since = "1.32.0")]
+ #[inline]
+ pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
+ // SAFETY: the caller must guarantee that `ptr` is non-null.
+ unsafe { NonNull { pointer: ptr as _ } }
+ }
+
+ /// Creates a new `NonNull` if `ptr` is non-null.
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[inline]
+ pub fn new(ptr: *mut T) -> Option<Self> {
+ if !ptr.is_null() {
+ // SAFETY: The pointer is already checked and is not null
+ Some(unsafe { Self::new_unchecked(ptr) })
+ } else {
+ None
+ }
+ }
+
+ /// Acquires the underlying `*mut` pointer.
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[rustc_const_stable(feature = "const_nonnull_as_ptr", since = "1.32.0")]
+ #[inline]
+ pub const fn as_ptr(self) -> *mut T {
+ self.pointer as *mut T
+ }
+
+ /// Returns a shared reference to the value. If the value may be uninitialized, [`as_uninit_ref`]
+ /// must be used instead.
+ ///
+ /// For the mutable counterpart see [`as_mut`].
+ ///
+ /// [`as_uninit_ref`]: NonNull::as_uninit_ref
+ /// [`as_mut`]: NonNull::as_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[inline]
+ pub unsafe fn as_ref(&self) -> &T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &*self.as_ptr() }
+ }
+
+ /// Returns a unique reference to the value. If the value may be uninitialized, [`as_uninit_mut`]
+ /// must be used instead.
+ ///
+ /// For the shared counterpart see [`as_ref`].
+ ///
+ /// [`as_uninit_mut`]: NonNull::as_uninit_mut
+ /// [`as_ref`]: NonNull::as_ref
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be properly aligned.
+ ///
+ /// * It must be "dereferencable" in the sense defined in [the module documentation].
+ ///
+ /// * The pointer must point to an initialized instance of `T`.
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ /// (The part about being initialized is not yet fully decided, but until
+ /// it is, the only safe approach is to ensure that they are indeed initialized.)
+ ///
+ /// [the module documentation]: crate::ptr#safety
+ #[stable(feature = "nonnull", since = "1.25.0")]
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a mutable reference.
+ unsafe { &mut *self.as_ptr() }
+ }
+
+ /// Casts to a pointer of another type.
+ #[stable(feature = "nonnull_cast", since = "1.27.0")]
+ #[rustc_const_stable(feature = "const_nonnull_cast", since = "1.32.0")]
+ #[inline]
+ pub const fn cast<U>(self) -> NonNull<U> {
+ // SAFETY: `self` is a `NonNull` pointer which is necessarily non-null
+ unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) }
+ }
+}
+
+impl<T> NonNull<[T]> {
+ /// Creates a non-null raw slice from a thin pointer and a length.
+ ///
+ /// The `len` argument is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, but dereferencing the return value is unsafe.
+ /// See the documentation of [`slice::from_raw_parts`] for slice safety requirements.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(nonnull_slice_from_raw_parts)]
+ ///
+ /// use std::ptr::NonNull;
+ ///
+ /// // create a slice pointer when starting out with a pointer to the first element
+ /// let mut x = [5, 6, 7];
+ /// let nonnull_pointer = NonNull::new(x.as_mut_ptr()).unwrap();
+ /// let slice = NonNull::slice_from_raw_parts(nonnull_pointer, 3);
+ /// assert_eq!(unsafe { slice.as_ref()[2] }, 7);
+ /// ```
+ ///
+ /// (Note that this example artificially demonstrates a use of this method,
+ /// but `let slice = NonNull::from(&x[..]);` would be a better way to write code like this.)
+ #[unstable(feature = "nonnull_slice_from_raw_parts", issue = "71941")]
+ #[rustc_const_unstable(feature = "const_nonnull_slice_from_raw_parts", issue = "71941")]
+ #[inline]
+ pub const fn slice_from_raw_parts(data: NonNull<T>, len: usize) -> Self {
+ // SAFETY: `data` is a `NonNull` pointer which is necessarily non-null
+ unsafe { Self::new_unchecked(super::slice_from_raw_parts_mut(data.as_ptr(), len)) }
+ }
+
+ /// Returns the length of a non-null raw slice.
+ ///
+ /// The returned value is the number of **elements**, not the number of bytes.
+ ///
+ /// This function is safe, even when the non-null raw slice cannot be dereferenced to a slice
+ /// because the pointer does not have a valid address.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_len, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
+ /// assert_eq!(slice.len(), 3);
+ /// ```
+ #[unstable(feature = "slice_ptr_len", issue = "71146")]
+ #[rustc_const_unstable(feature = "const_slice_ptr_len", issue = "71146")]
+ #[inline]
+ pub const fn len(self) -> usize {
+ self.as_ptr().len()
+ }
+
+ /// Returns a non-null pointer to the slice's buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
+ /// assert_eq!(slice.as_non_null_ptr(), NonNull::new(1 as *mut i8).unwrap());
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_non_null_ptr(self) -> NonNull<T> {
+ // SAFETY: We know `self` is non-null.
+ unsafe { NonNull::new_unchecked(self.as_ptr().as_mut_ptr()) }
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3);
+ /// assert_eq!(slice.as_mut_ptr(), 1 as *mut i8);
+ /// ```
+ #[inline]
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[rustc_const_unstable(feature = "slice_ptr_get", issue = "74265")]
+ pub const fn as_mut_ptr(self) -> *mut T {
+ self.as_non_null_ptr().as_ptr()
+ }
+
+ /// Returns a shared reference to a slice of possibly uninitialized values. In contrast to
+ /// [`as_ref`], this does not require that the value has to be initialized.
+ ///
+ /// For the mutable counterpart see [`as_uninit_slice_mut`].
+ ///
+ /// [`as_ref`]: NonNull::as_ref
+ /// [`as_uninit_slice_mut`]: NonNull::as_uninit_slice_mut
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads for `ptr.len() * mem::size_of::<T>()` many bytes,
+ /// and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get mutated (except inside `UnsafeCell`).
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts`].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_slice(&self) -> &[MaybeUninit<T>] {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice`.
+ unsafe { slice::from_raw_parts(self.cast().as_ptr(), self.len()) }
+ }
+
+ /// Returns a unique reference to a slice of possibly uninitialized values. In contrast to
+ /// [`as_mut`], this does not require that the value has to be initialized.
+ ///
+ /// For the shared counterpart see [`as_uninit_slice`].
+ ///
+ /// [`as_mut`]: NonNull::as_mut
+ /// [`as_uninit_slice`]: NonNull::as_uninit_slice
+ ///
+ /// # Safety
+ ///
+ /// When calling this method, you have to ensure that all of the following is true:
+ ///
+ /// * The pointer must be [valid] for reads and writes for `ptr.len() * mem::size_of::<T>()`
+ /// many bytes, and it must be properly aligned. This means in particular:
+ ///
+ /// * The entire memory range of this slice must be contained within a single allocated object!
+ /// Slices can never span across multiple allocated objects.
+ ///
+ /// * The pointer must be aligned even for zero-length slices. One
+ /// reason for this is that enum layout optimizations may rely on references
+ /// (including slices of any length) being aligned and non-null to distinguish
+ /// them from other data. You can obtain a pointer that is usable as `data`
+ /// for zero-length slices using [`NonNull::dangling()`].
+ ///
+ /// * The total size `ptr.len() * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`].
+ ///
+ /// * You must enforce Rust's aliasing rules, since the returned lifetime `'a` is
+ /// arbitrarily chosen and does not necessarily reflect the actual lifetime of the data.
+ /// In particular, for the duration of this lifetime, the memory the pointer points to must
+ /// not get accessed (read or written) through any other pointer.
+ ///
+ /// This applies even if the result of this method is unused!
+ ///
+ /// See also [`slice::from_raw_parts_mut`].
+ ///
+ /// [valid]: crate::ptr#safety
+ /// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(allocator_api, ptr_as_uninit)]
+ ///
+ /// use std::alloc::{AllocRef, Layout, Global};
+ /// use std::mem::MaybeUninit;
+ /// use std::ptr::NonNull;
+ ///
+ /// let memory: NonNull<[u8]> = Global.alloc(Layout::new::<[u8; 32]>())?;
+ /// // This is safe as `memory` is valid for reads and writes for `memory.len()` many bytes.
+ /// // Note that calling `memory.as_mut()` is not allowed here as the content may be uninitialized.
+ /// # #[allow(unused_variables)]
+ /// let slice: &mut [MaybeUninit<u8>] = unsafe { memory.as_uninit_slice_mut() };
+ /// # Ok::<_, std::alloc::AllocError>(())
+ /// ```
+ #[inline]
+ #[unstable(feature = "ptr_as_uninit", issue = "75402")]
+ pub unsafe fn as_uninit_slice_mut(&self) -> &mut [MaybeUninit<T>] {
+ // SAFETY: the caller must uphold the safety contract for `as_uninit_slice_mut`.
+ unsafe { slice::from_raw_parts_mut(self.cast().as_ptr(), self.len()) }
+ }
+
+ /// Returns a raw pointer to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// Calling this method with an out-of-bounds index or when `self` is not dereferencable
+ /// is *[undefined behavior]* even if the resulting pointer is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_ptr_get, nonnull_slice_from_raw_parts)]
+ /// use std::ptr::NonNull;
+ ///
+ /// let x = &mut [1, 2, 4];
+ /// let x = NonNull::slice_from_raw_parts(NonNull::new(x.as_mut_ptr()).unwrap(), x.len());
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked_mut(1).as_ptr(), x.as_non_null_ptr().as_ptr().add(1));
+ /// }
+ /// ```
+ #[unstable(feature = "slice_ptr_get", issue = "74265")]
+ #[inline]
+ pub unsafe fn get_unchecked_mut<I>(self, index: I) -> NonNull<I::Output>
+ where
+ I: SliceIndex<[T]>,
+ {
+ // SAFETY: the caller ensures that `self` is dereferencable and `index` in-bounds.
+ // As a consequence, the resulting pointer cannot be NULL.
+ unsafe { NonNull::new_unchecked(self.as_ptr().get_unchecked_mut(index)) }
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Clone for NonNull<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Copy for NonNull<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> fmt::Debug for NonNull<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> fmt::Pointer for NonNull<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Eq for NonNull<T> {}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> PartialEq for NonNull<T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.as_ptr() == other.as_ptr()
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> Ord for NonNull<T> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.as_ptr().cmp(&other.as_ptr())
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> PartialOrd for NonNull<T> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.as_ptr().partial_cmp(&other.as_ptr())
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> hash::Hash for NonNull<T> {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.as_ptr().hash(state)
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> From<Unique<T>> for NonNull<T> {
+ #[inline]
+ fn from(unique: Unique<T>) -> Self {
+ // SAFETY: A Unique pointer cannot be null, so the conditions for
+ // new_unchecked() are respected.
+ unsafe { NonNull::new_unchecked(unique.as_ptr()) }
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> From<&mut T> for NonNull<T> {
+ #[inline]
+ fn from(reference: &mut T) -> Self {
+ // SAFETY: A mutable reference cannot be null.
+ unsafe { NonNull { pointer: reference as *mut T } }
+ }
+}
+
+#[stable(feature = "nonnull", since = "1.25.0")]
+impl<T: ?Sized> From<&T> for NonNull<T> {
+ #[inline]
+ fn from(reference: &T) -> Self {
+ // SAFETY: A reference cannot be null, so the conditions for
+ // new_unchecked() are respected.
+ unsafe { NonNull { pointer: reference as *const T } }
+ }
+}
--- /dev/null
+use crate::convert::From;
+use crate::fmt;
+use crate::marker::{PhantomData, Unsize};
+use crate::mem;
+use crate::ops::{CoerceUnsized, DispatchFromDyn};
+
+/// A wrapper around a raw non-null `*mut T` that indicates that the possessor
+/// of this wrapper owns the referent. Useful for building abstractions like
+/// `Box<T>`, `Vec<T>`, `String`, and `HashMap<K, V>`.
+///
+/// Unlike `*mut T`, `Unique<T>` behaves "as if" it were an instance of `T`.
+/// It implements `Send`/`Sync` if `T` is `Send`/`Sync`. It also implies
+/// the kind of strong aliasing guarantees an instance of `T` can expect:
+/// the referent of the pointer should not be modified without a unique path to
+/// its owning Unique.
+///
+/// If you're uncertain of whether it's correct to use `Unique` for your purposes,
+/// consider using `NonNull`, which has weaker semantics.
+///
+/// Unlike `*mut T`, the pointer must always be non-null, even if the pointer
+/// is never dereferenced. This is so that enums may use this forbidden value
+/// as a discriminant -- `Option<Unique<T>>` has the same size as `Unique<T>`.
+/// However the pointer may still dangle if it isn't dereferenced.
+///
+/// Unlike `*mut T`, `Unique<T>` is covariant over `T`. This should always be correct
+/// for any type which upholds Unique's aliasing requirements.
+#[unstable(
+ feature = "ptr_internals",
+ issue = "none",
+ reason = "use `NonNull` instead and consider `PhantomData<T>` \
+ (if you also use `#[may_dangle]`), `Send`, and/or `Sync`"
+)]
+#[doc(hidden)]
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(1)]
+pub struct Unique<T: ?Sized> {
+ pointer: *const T,
+ // NOTE: this marker has no consequences for variance, but is necessary
+ // for dropck to understand that we logically own a `T`.
+ //
+ // For details, see:
+ // https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
+ _marker: PhantomData<T>,
+}
+
+/// `Unique` pointers are `Send` if `T` is `Send` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `Unique` must enforce it.
+#[unstable(feature = "ptr_internals", issue = "none")]
+unsafe impl<T: Send + ?Sized> Send for Unique<T> {}
+
+/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
+/// reference is unaliased. Note that this aliasing invariant is
+/// unenforced by the type system; the abstraction using the
+/// `Unique` must enforce it.
+#[unstable(feature = "ptr_internals", issue = "none")]
+unsafe impl<T: Sync + ?Sized> Sync for Unique<T> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: Sized> Unique<T> {
+ /// Creates a new `Unique` that is dangling, but well-aligned.
+ ///
+ /// This is useful for initializing types which lazily allocate, like
+ /// `Vec::new` does.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer to
+ /// a `T`, which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ #[inline]
+ pub const fn dangling() -> Self {
+ // SAFETY: mem::align_of() returns a valid, non-null pointer. The
+ // conditions to call new_unchecked() are thus respected.
+ unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) }
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> Unique<T> {
+ /// Creates a new `Unique`.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null.
+ #[inline]
+ pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
+ // SAFETY: the caller must guarantee that `ptr` is non-null.
+ unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }
+ }
+
+ /// Creates a new `Unique` if `ptr` is non-null.
+ #[inline]
+ pub fn new(ptr: *mut T) -> Option<Self> {
+ if !ptr.is_null() {
+ // SAFETY: The pointer has already been checked and is not null.
+ Some(unsafe { Unique { pointer: ptr as _, _marker: PhantomData } })
+ } else {
+ None
+ }
+ }
+
+ /// Acquires the underlying `*mut` pointer.
+ #[inline]
+ pub const fn as_ptr(self) -> *mut T {
+ self.pointer as *mut T
+ }
+
+ /// Dereferences the content.
+ ///
+ /// The resulting lifetime is bound to self so this behaves "as if"
+ /// it were actually an instance of T that is getting borrowed. If a longer
+ /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
+ #[inline]
+ pub unsafe fn as_ref(&self) -> &T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a reference.
+ unsafe { &*self.as_ptr() }
+ }
+
+ /// Mutably dereferences the content.
+ ///
+ /// The resulting lifetime is bound to self so this behaves "as if"
+ /// it were actually an instance of T that is getting borrowed. If a longer
+ /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut T {
+ // SAFETY: the caller must guarantee that `self` meets all the
+ // requirements for a mutable reference.
+ unsafe { &mut *self.as_ptr() }
+ }
+
+ /// Casts to a pointer of another type.
+ #[inline]
+ pub const fn cast<U>(self) -> Unique<U> {
+ // SAFETY: Unique::new_unchecked() creates a new unique and needs
+ // the given pointer to not be null.
+ // Since we are passing self as a pointer, it cannot be null.
+ unsafe { Unique::new_unchecked(self.as_ptr() as *mut U) }
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> Clone for Unique<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> Copy for Unique<T> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> fmt::Debug for Unique<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> fmt::Pointer for Unique<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.as_ptr(), f)
+ }
+}
+
+#[unstable(feature = "ptr_internals", issue = "none")]
+impl<T: ?Sized> From<&mut T> for Unique<T> {
+ #[inline]
+ fn from(reference: &mut T) -> Self {
+ // SAFETY: A mutable reference cannot be null
+ unsafe { Unique { pointer: reference as *mut T, _marker: PhantomData } }
+ }
+}
--- /dev/null
+#![allow(missing_docs)]
+#![unstable(feature = "raw", issue = "27751")]
+
+//! Contains struct definitions for the layout of compiler built-in types.
+//!
+//! They can be used as targets of transmutes in unsafe code for manipulating
+//! the raw representations directly.
+//!
+//! Their definition should always match the ABI defined in
+//! `rustc_middle::ty::layout`.
+
+/// The representation of a trait object like `&dyn SomeTrait`.
+///
+/// This struct has the same layout as types like `&dyn SomeTrait` and
+/// `Box<dyn AnotherTrait>`.
+///
+/// `TraitObject` is guaranteed to match layouts, but it is not the
+/// type of trait objects (e.g., the fields are not directly accessible
+/// on a `&dyn SomeTrait`) nor does it control that layout (changing the
+/// definition will not change the layout of a `&dyn SomeTrait`). It is
+/// only designed to be used by unsafe code that needs to manipulate
+/// the low-level details.
+///
+/// There is no way to refer to all trait objects generically, so the only
+/// way to create values of this type is with functions like
+/// [`std::mem::transmute`][transmute]. Similarly, the only way to create a true
+/// trait object from a `TraitObject` value is with `transmute`.
+///
+/// [transmute]: crate::intrinsics::transmute
+///
+/// Synthesizing a trait object with mismatched types—one where the
+/// vtable does not correspond to the type of the value to which the
+/// data pointer points—is highly likely to lead to undefined
+/// behavior.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(raw)]
+///
+/// use std::{mem, raw};
+///
+/// // an example trait
+/// trait Foo {
+/// fn bar(&self) -> i32;
+/// }
+///
+/// impl Foo for i32 {
+/// fn bar(&self) -> i32 {
+/// *self + 1
+/// }
+/// }
+///
+/// let value: i32 = 123;
+///
+/// // let the compiler make a trait object
+/// let object: &dyn Foo = &value;
+///
+/// // look at the raw representation
+/// let raw_object: raw::TraitObject = unsafe { mem::transmute(object) };
+///
+/// // the data pointer is the address of `value`
+/// assert_eq!(raw_object.data as *const i32, &value as *const _);
+///
+/// let other_value: i32 = 456;
+///
+/// // construct a new object, pointing to a different `i32`, being
+/// // careful to use the `i32` vtable from `object`
+/// let synthesized: &dyn Foo = unsafe {
+/// mem::transmute(raw::TraitObject {
+/// data: &other_value as *const _ as *mut (),
+/// vtable: raw_object.vtable,
+/// })
+/// };
+///
+/// // it should work just as if we had constructed a trait object out of
+/// // `other_value` directly
+/// assert_eq!(synthesized.bar(), 457);
+/// ```
+#[repr(C)]
+#[derive(Copy, Clone)]
+#[allow(missing_debug_implementations)]
+pub struct TraitObject {
+ pub data: *mut (),
+ pub vtable: *mut (),
+}
--- /dev/null
+//! Error handling with the `Result` type.
+//!
+//! [`Result<T, E>`][`Result`] is the type used for returning and propagating
+//! errors. It is an enum with the variants, [`Ok(T)`], representing
+//! success and containing a value, and [`Err(E)`], representing error
+//! and containing an error value.
+//!
+//! ```
+//! # #[allow(dead_code)]
+//! enum Result<T, E> {
+//! Ok(T),
+//! Err(E),
+//! }
+//! ```
+//!
+//! Functions return [`Result`] whenever errors are expected and
+//! recoverable. In the `std` crate, [`Result`] is most prominently used
+//! for [I/O](../../std/io/index.html).
+//!
+//! A simple function returning [`Result`] might be
+//! defined and used like so:
+//!
+//! ```
+//! #[derive(Debug)]
+//! enum Version { Version1, Version2 }
+//!
+//! fn parse_version(header: &[u8]) -> Result<Version, &'static str> {
+//! match header.get(0) {
+//! None => Err("invalid header length"),
+//! Some(&1) => Ok(Version::Version1),
+//! Some(&2) => Ok(Version::Version2),
+//! Some(_) => Err("invalid version"),
+//! }
+//! }
+//!
+//! let version = parse_version(&[1, 2, 3, 4]);
+//! match version {
+//! Ok(v) => println!("working with version: {:?}", v),
+//! Err(e) => println!("error parsing header: {:?}", e),
+//! }
+//! ```
+//!
+//! Pattern matching on [`Result`]s is clear and straightforward for
+//! simple cases, but [`Result`] comes with some convenience methods
+//! that make working with it more succinct.
+//!
+//! ```
+//! let good_result: Result<i32, i32> = Ok(10);
+//! let bad_result: Result<i32, i32> = Err(10);
+//!
+//! // The `is_ok` and `is_err` methods do what they say.
+//! assert!(good_result.is_ok() && !good_result.is_err());
+//! assert!(bad_result.is_err() && !bad_result.is_ok());
+//!
+//! // `map` consumes the `Result` and produces another.
+//! let good_result: Result<i32, i32> = good_result.map(|i| i + 1);
+//! let bad_result: Result<i32, i32> = bad_result.map(|i| i - 1);
+//!
+//! // Use `and_then` to continue the computation.
+//! let good_result: Result<bool, i32> = good_result.and_then(|i| Ok(i == 11));
+//!
+//! // Use `or_else` to handle the error.
+//! let bad_result: Result<i32, i32> = bad_result.or_else(|i| Ok(i + 20));
+//!
+//! // Consume the result and return the contents with `unwrap`.
+//! let final_awesome_result = good_result.unwrap();
+//! ```
+//!
+//! # Results must be used
+//!
+//! A common problem with using return values to indicate errors is
+//! that it is easy to ignore the return value, thus failing to handle
+//! the error. [`Result`] is annotated with the `#[must_use]` attribute,
+//! which will cause the compiler to issue a warning when a Result
+//! value is ignored. This makes [`Result`] especially useful with
+//! functions that may encounter errors but don't otherwise return a
+//! useful value.
+//!
+//! Consider the [`write_all`] method defined for I/O types
+//! by the [`Write`] trait:
+//!
+//! ```
+//! use std::io;
+//!
+//! trait Write {
+//! fn write_all(&mut self, bytes: &[u8]) -> Result<(), io::Error>;
+//! }
+//! ```
+//!
+//! *Note: The actual definition of [`Write`] uses [`io::Result`], which
+//! is just a synonym for [`Result`]`<T, `[`io::Error`]`>`.*
+//!
+//! This method doesn't produce a value, but the write may
+//! fail. It's crucial to handle the error case, and *not* write
+//! something like this:
+//!
+//! ```no_run
+//! # #![allow(unused_must_use)] // \o/
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//!
+//! let mut file = File::create("valuable_data.txt").unwrap();
+//! // If `write_all` errors, then we'll never know, because the return
+//! // value is ignored.
+//! file.write_all(b"important message");
+//! ```
+//!
+//! If you *do* write that in Rust, the compiler will give you a
+//! warning (by default, controlled by the `unused_must_use` lint).
+//!
+//! You might instead, if you don't want to handle the error, simply
+//! assert success with [`expect`]. This will panic if the
+//! write fails, providing a marginally useful message indicating why:
+//!
+//! ```{.no_run}
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//!
+//! let mut file = File::create("valuable_data.txt").unwrap();
+//! file.write_all(b"important message").expect("failed to write message");
+//! ```
+//!
+//! You might also simply assert success:
+//!
+//! ```{.no_run}
+//! # use std::fs::File;
+//! # use std::io::prelude::*;
+//! # let mut file = File::create("valuable_data.txt").unwrap();
+//! assert!(file.write_all(b"important message").is_ok());
+//! ```
+//!
+//! Or propagate the error up the call stack with [`?`]:
+//!
+//! ```
+//! # use std::fs::File;
+//! # use std::io::prelude::*;
+//! # use std::io;
+//! # #[allow(dead_code)]
+//! fn write_message() -> io::Result<()> {
+//! let mut file = File::create("valuable_data.txt")?;
+//! file.write_all(b"important message")?;
+//! Ok(())
+//! }
+//! ```
+//!
+//! # The question mark operator, `?`
+//!
+//! When writing code that calls many functions that return the
+//! [`Result`] type, the error handling can be tedious. The question mark
+//! operator, [`?`], hides some of the boilerplate of propagating errors
+//! up the call stack.
+//!
+//! It replaces this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//! use std::io;
+//!
+//! struct Info {
+//! name: String,
+//! age: i32,
+//! rating: i32,
+//! }
+//!
+//! fn write_info(info: &Info) -> io::Result<()> {
+//! // Early return on error
+//! let mut file = match File::create("my_best_friends.txt") {
+//! Err(e) => return Err(e),
+//! Ok(f) => f,
+//! };
+//! if let Err(e) = file.write_all(format!("name: {}\n", info.name).as_bytes()) {
+//! return Err(e)
+//! }
+//! if let Err(e) = file.write_all(format!("age: {}\n", info.age).as_bytes()) {
+//! return Err(e)
+//! }
+//! if let Err(e) = file.write_all(format!("rating: {}\n", info.rating).as_bytes()) {
+//! return Err(e)
+//! }
+//! Ok(())
+//! }
+//! ```
+//!
+//! With this:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! use std::fs::File;
+//! use std::io::prelude::*;
+//! use std::io;
+//!
+//! struct Info {
+//! name: String,
+//! age: i32,
+//! rating: i32,
+//! }
+//!
+//! fn write_info(info: &Info) -> io::Result<()> {
+//! let mut file = File::create("my_best_friends.txt")?;
+//! // Early return on error
+//! file.write_all(format!("name: {}\n", info.name).as_bytes())?;
+//! file.write_all(format!("age: {}\n", info.age).as_bytes())?;
+//! file.write_all(format!("rating: {}\n", info.rating).as_bytes())?;
+//! Ok(())
+//! }
+//! ```
+//!
+//! *It's much nicer!*
+//!
+//! Ending the expression with [`?`] will result in the unwrapped
+//! success ([`Ok`]) value, unless the result is [`Err`], in which case
+//! [`Err`] is returned early from the enclosing function.
+//!
+//! [`?`] can only be used in functions that return [`Result`] because of the
+//! early return of [`Err`] that it provides.
+//!
+//! [`expect`]: Result::expect
+//! [`Write`]: ../../std/io/trait.Write.html
+//! [`write_all`]: ../../std/io/trait.Write.html#method.write_all
+//! [`io::Result`]: ../../std/io/type.Result.html
+//! [`?`]: crate::ops::Try
+//! [`Ok(T)`]: Ok
+//! [`Err(E)`]: Err
+//! [`io::Error`]: ../../std/io/struct.Error.html
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::iter::{self, FromIterator, FusedIterator, TrustedLen};
+use crate::ops::{self, Deref, DerefMut};
+use crate::{convert, fmt};
+
+/// `Result` is a type that represents either success ([`Ok`]) or failure ([`Err`]).
+///
+/// See the [module documentation](self) for details.
+#[derive(Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)]
+#[must_use = "this `Result` may be an `Err` variant, which should be handled"]
+#[rustc_diagnostic_item = "result_type"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub enum Result<T, E> {
+ /// Contains the success value
+ #[lang = "Ok"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Ok(#[stable(feature = "rust1", since = "1.0.0")] T),
+
+ /// Contains the error value
+ #[lang = "Err"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Err(#[stable(feature = "rust1", since = "1.0.0")] E),
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Type implementation
+/////////////////////////////////////////////////////////////////////////////
+
+impl<T, E> Result<T, E> {
+ /////////////////////////////////////////////////////////////////////////
+ // Querying the contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `true` if the result is [`Ok`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<i32, &str> = Ok(-3);
+ /// assert_eq!(x.is_ok(), true);
+ ///
+ /// let x: Result<i32, &str> = Err("Some error message");
+ /// assert_eq!(x.is_ok(), false);
+ /// ```
+ #[must_use = "if you intended to assert that this is ok, consider `.unwrap()` instead"]
+ #[rustc_const_stable(feature = "const_result", since = "1.48.0")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn is_ok(&self) -> bool {
+ matches!(*self, Ok(_))
+ }
+
+ /// Returns `true` if the result is [`Err`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<i32, &str> = Ok(-3);
+ /// assert_eq!(x.is_err(), false);
+ ///
+ /// let x: Result<i32, &str> = Err("Some error message");
+ /// assert_eq!(x.is_err(), true);
+ /// ```
+ #[must_use = "if you intended to assert that this is err, consider `.unwrap_err()` instead"]
+ #[rustc_const_stable(feature = "const_result", since = "1.48.0")]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn is_err(&self) -> bool {
+ !self.is_ok()
+ }
+
+ /// Returns `true` if the result is an [`Ok`] value containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(option_result_contains)]
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.contains(&2), true);
+ ///
+ /// let x: Result<u32, &str> = Ok(3);
+ /// assert_eq!(x.contains(&2), false);
+ ///
+ /// let x: Result<u32, &str> = Err("Some error message");
+ /// assert_eq!(x.contains(&2), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "option_result_contains", issue = "62358")]
+ pub fn contains<U>(&self, x: &U) -> bool
+ where
+ U: PartialEq<T>,
+ {
+ match self {
+ Ok(y) => x == y,
+ Err(_) => false,
+ }
+ }
+
+ /// Returns `true` if the result is an [`Err`] value containing the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_contains_err)]
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.contains_err(&"Some error message"), false);
+ ///
+ /// let x: Result<u32, &str> = Err("Some error message");
+ /// assert_eq!(x.contains_err(&"Some error message"), true);
+ ///
+ /// let x: Result<u32, &str> = Err("Some other error message");
+ /// assert_eq!(x.contains_err(&"Some error message"), false);
+ /// ```
+ #[must_use]
+ #[inline]
+ #[unstable(feature = "result_contains_err", issue = "62358")]
+ pub fn contains_err<F>(&self, f: &F) -> bool
+ where
+ F: PartialEq<E>,
+ {
+ match self {
+ Ok(_) => false,
+ Err(e) => f == e,
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Adapter for each variant
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Converts from `Result<T, E>` to [`Option<T>`].
+ ///
+ /// Converts `self` into an [`Option<T>`], consuming `self`,
+ /// and discarding the error, if any.
+ ///
+ /// [`Option<T>`]: Option
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.ok(), Some(2));
+ ///
+ /// let x: Result<u32, &str> = Err("Nothing here");
+ /// assert_eq!(x.ok(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn ok(self) -> Option<T> {
+ match self {
+ Ok(x) => Some(x),
+ Err(_) => None,
+ }
+ }
+
+ /// Converts from `Result<T, E>` to [`Option<E>`].
+ ///
+ /// Converts `self` into an [`Option<E>`], consuming `self`,
+ /// and discarding the success value, if any.
+ ///
+ /// [`Option<E>`]: Option
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.err(), None);
+ ///
+ /// let x: Result<u32, &str> = Err("Nothing here");
+ /// assert_eq!(x.err(), Some("Nothing here"));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn err(self) -> Option<E> {
+ match self {
+ Ok(_) => None,
+ Err(x) => Some(x),
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Adapter for working with references
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Converts from `&Result<T, E>` to `Result<&T, &E>`.
+ ///
+ /// Produces a new `Result`, containing a reference
+ /// into the original, leaving the original in place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.as_ref(), Ok(&2));
+ ///
+ /// let x: Result<u32, &str> = Err("Error");
+ /// assert_eq!(x.as_ref(), Err(&"Error"));
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_result", since = "1.48.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn as_ref(&self) -> Result<&T, &E> {
+ match *self {
+ Ok(ref x) => Ok(x),
+ Err(ref x) => Err(x),
+ }
+ }
+
+ /// Converts from `&mut Result<T, E>` to `Result<&mut T, &mut E>`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn mutate(r: &mut Result<i32, i32>) {
+ /// match r.as_mut() {
+ /// Ok(v) => *v = 42,
+ /// Err(e) => *e = 0,
+ /// }
+ /// }
+ ///
+ /// let mut x: Result<i32, i32> = Ok(2);
+ /// mutate(&mut x);
+ /// assert_eq!(x.unwrap(), 42);
+ ///
+ /// let mut x: Result<i32, i32> = Err(13);
+ /// mutate(&mut x);
+ /// assert_eq!(x.unwrap_err(), 0);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_mut(&mut self) -> Result<&mut T, &mut E> {
+ match *self {
+ Ok(ref mut x) => Ok(x),
+ Err(ref mut x) => Err(x),
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Transforming contained values
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Maps a `Result<T, E>` to `Result<U, E>` by applying a function to a
+ /// contained [`Ok`] value, leaving an [`Err`] value untouched.
+ ///
+ /// This function can be used to compose the results of two functions.
+ ///
+ /// # Examples
+ ///
+ /// Print the numbers on each line of a string multiplied by two.
+ ///
+ /// ```
+ /// let line = "1\n2\n3\n4\n";
+ ///
+ /// for num in line.lines() {
+ /// match num.parse::<i32>().map(|i| i * 2) {
+ /// Ok(n) => println!("{}", n),
+ /// Err(..) => {}
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map<U, F: FnOnce(T) -> U>(self, op: F) -> Result<U, E> {
+ match self {
+ Ok(t) => Ok(op(t)),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Applies a function to the contained value (if [`Ok`]),
+ /// or returns the provided default (if [`Err`]).
+ ///
+ /// Arguments passed to `map_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`map_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`map_or_else`]: Result::map_or_else
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Result<_, &str> = Ok("foo");
+ /// assert_eq!(x.map_or(42, |v| v.len()), 3);
+ ///
+ /// let x: Result<&str, _> = Err("bar");
+ /// assert_eq!(x.map_or(42, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "result_map_or", since = "1.41.0")]
+ pub fn map_or<U, F: FnOnce(T) -> U>(self, default: U, f: F) -> U {
+ match self {
+ Ok(t) => f(t),
+ Err(_) => default,
+ }
+ }
+
+ /// Maps a `Result<T, E>` to `U` by applying a function to a
+ /// contained [`Ok`] value, or a fallback function to a
+ /// contained [`Err`] value.
+ ///
+ /// This function can be used to unpack a successful result
+ /// while handling an error.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let k = 21;
+ ///
+ /// let x : Result<_, &str> = Ok("foo");
+ /// assert_eq!(x.map_or_else(|e| k * 2, |v| v.len()), 3);
+ ///
+ /// let x : Result<&str, _> = Err("bar");
+ /// assert_eq!(x.map_or_else(|e| k * 2, |v| v.len()), 42);
+ /// ```
+ #[inline]
+ #[stable(feature = "result_map_or_else", since = "1.41.0")]
+ pub fn map_or_else<U, D: FnOnce(E) -> U, F: FnOnce(T) -> U>(self, default: D, f: F) -> U {
+ match self {
+ Ok(t) => f(t),
+ Err(e) => default(e),
+ }
+ }
+
+ /// Maps a `Result<T, E>` to `Result<T, F>` by applying a function to a
+ /// contained [`Err`] value, leaving an [`Ok`] value untouched.
+ ///
+ /// This function can be used to pass through a successful result while handling
+ /// an error.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn stringify(x: u32) -> String { format!("error code: {}", x) }
+ ///
+ /// let x: Result<u32, u32> = Ok(2);
+ /// assert_eq!(x.map_err(stringify), Ok(2));
+ ///
+ /// let x: Result<u32, u32> = Err(13);
+ /// assert_eq!(x.map_err(stringify), Err("error code: 13".to_string()));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn map_err<F, O: FnOnce(E) -> F>(self, op: O) -> Result<T, F> {
+ match self {
+ Ok(t) => Ok(t),
+ Err(e) => Err(op(e)),
+ }
+ }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Iterator constructors
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns an iterator over the possibly contained value.
+ ///
+ /// The iterator yields one value if the result is [`Result::Ok`], otherwise none.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(7);
+ /// assert_eq!(x.iter().next(), Some(&7));
+ ///
+ /// let x: Result<u32, &str> = Err("nothing!");
+ /// assert_eq!(x.iter().next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { inner: self.as_ref().ok() }
+ }
+
+ /// Returns a mutable iterator over the possibly contained value.
+ ///
+ /// The iterator yields one value if the result is [`Result::Ok`], otherwise none.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut x: Result<u32, &str> = Ok(7);
+ /// match x.iter_mut().next() {
+ /// Some(v) => *v = 40,
+ /// None => {},
+ /// }
+ /// assert_eq!(x, Ok(40));
+ ///
+ /// let mut x: Result<u32, &str> = Err("nothing!");
+ /// assert_eq!(x.iter_mut().next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut { inner: self.as_mut().ok() }
+ }
+
+ ////////////////////////////////////////////////////////////////////////
+ // Boolean operations on the values, eager and lazy
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns `res` if the result is [`Ok`], otherwise returns the [`Err`] value of `self`.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<&str, &str> = Err("late error");
+ /// assert_eq!(x.and(y), Err("late error"));
+ ///
+ /// let x: Result<u32, &str> = Err("early error");
+ /// let y: Result<&str, &str> = Ok("foo");
+ /// assert_eq!(x.and(y), Err("early error"));
+ ///
+ /// let x: Result<u32, &str> = Err("not a 2");
+ /// let y: Result<&str, &str> = Err("late error");
+ /// assert_eq!(x.and(y), Err("not a 2"));
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<&str, &str> = Ok("different result type");
+ /// assert_eq!(x.and(y), Ok("different result type"));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn and<U>(self, res: Result<U, E>) -> Result<U, E> {
+ match self {
+ Ok(_) => res,
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Calls `op` if the result is [`Ok`], otherwise returns the [`Err`] value of `self`.
+ ///
+ ///
+ /// This function can be used for control flow based on `Result` values.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn sq(x: u32) -> Result<u32, u32> { Ok(x * x) }
+ /// fn err(x: u32) -> Result<u32, u32> { Err(x) }
+ ///
+ /// assert_eq!(Ok(2).and_then(sq).and_then(sq), Ok(16));
+ /// assert_eq!(Ok(2).and_then(sq).and_then(err), Err(4));
+ /// assert_eq!(Ok(2).and_then(err).and_then(sq), Err(2));
+ /// assert_eq!(Err(3).and_then(sq).and_then(sq), Err(3));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn and_then<U, F: FnOnce(T) -> Result<U, E>>(self, op: F) -> Result<U, E> {
+ match self {
+ Ok(t) => op(t),
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Returns `res` if the result is [`Err`], otherwise returns the [`Ok`] value of `self`.
+ ///
+ /// Arguments passed to `or` are eagerly evaluated; if you are passing the
+ /// result of a function call, it is recommended to use [`or_else`], which is
+ /// lazily evaluated.
+ ///
+ /// [`or_else`]: Result::or_else
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<u32, &str> = Err("late error");
+ /// assert_eq!(x.or(y), Ok(2));
+ ///
+ /// let x: Result<u32, &str> = Err("early error");
+ /// let y: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.or(y), Ok(2));
+ ///
+ /// let x: Result<u32, &str> = Err("not a 2");
+ /// let y: Result<u32, &str> = Err("late error");
+ /// assert_eq!(x.or(y), Err("late error"));
+ ///
+ /// let x: Result<u32, &str> = Ok(2);
+ /// let y: Result<u32, &str> = Ok(100);
+ /// assert_eq!(x.or(y), Ok(2));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or<F>(self, res: Result<T, F>) -> Result<T, F> {
+ match self {
+ Ok(v) => Ok(v),
+ Err(_) => res,
+ }
+ }
+
+ /// Calls `op` if the result is [`Err`], otherwise returns the [`Ok`] value of `self`.
+ ///
+ /// This function can be used for control flow based on result values.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn sq(x: u32) -> Result<u32, u32> { Ok(x * x) }
+ /// fn err(x: u32) -> Result<u32, u32> { Err(x) }
+ ///
+ /// assert_eq!(Ok(2).or_else(sq).or_else(sq), Ok(2));
+ /// assert_eq!(Ok(2).or_else(err).or_else(sq), Ok(2));
+ /// assert_eq!(Err(3).or_else(sq).or_else(err), Ok(9));
+ /// assert_eq!(Err(3).or_else(err).or_else(err), Err(3));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_else<F, O: FnOnce(E) -> Result<T, F>>(self, op: O) -> Result<T, F> {
+ match self {
+ Ok(t) => Ok(t),
+ Err(e) => op(e),
+ }
+ }
+
+ /// Returns the contained [`Ok`] value or a provided default.
+ ///
+ /// Arguments passed to `unwrap_or` are eagerly evaluated; if you are passing
+ /// the result of a function call, it is recommended to use [`unwrap_or_else`],
+ /// which is lazily evaluated.
+ ///
+ /// [`unwrap_or_else`]: Result::unwrap_or_else
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let default = 2;
+ /// let x: Result<u32, &str> = Ok(9);
+ /// assert_eq!(x.unwrap_or(default), 9);
+ ///
+ /// let x: Result<u32, &str> = Err("error");
+ /// assert_eq!(x.unwrap_or(default), default);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_or(self, default: T) -> T {
+ match self {
+ Ok(t) => t,
+ Err(_) => default,
+ }
+ }
+
+ /// Returns the contained [`Ok`] value or computes it from a closure.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// fn count(x: &str) -> usize { x.len() }
+ ///
+ /// assert_eq!(Ok(2).unwrap_or_else(count), 2);
+ /// assert_eq!(Err("foo").unwrap_or_else(count), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_or_else<F: FnOnce(E) -> T>(self, op: F) -> T {
+ match self {
+ Ok(t) => t,
+ Err(e) => op(e),
+ }
+ }
+}
+
+impl<T: Copy, E> Result<&T, E> {
+ /// Maps a `Result<&T, E>` to a `Result<T, E>` by copying the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_copied)]
+ /// let val = 12;
+ /// let x: Result<&i32, i32> = Ok(&val);
+ /// assert_eq!(x, Ok(&12));
+ /// let copied = x.copied();
+ /// assert_eq!(copied, Ok(12));
+ /// ```
+ #[unstable(feature = "result_copied", reason = "newly added", issue = "63168")]
+ pub fn copied(self) -> Result<T, E> {
+ self.map(|&t| t)
+ }
+}
+
+impl<T: Copy, E> Result<&mut T, E> {
+ /// Maps a `Result<&mut T, E>` to a `Result<T, E>` by copying the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_copied)]
+ /// let mut val = 12;
+ /// let x: Result<&mut i32, i32> = Ok(&mut val);
+ /// assert_eq!(x, Ok(&mut 12));
+ /// let copied = x.copied();
+ /// assert_eq!(copied, Ok(12));
+ /// ```
+ #[unstable(feature = "result_copied", reason = "newly added", issue = "63168")]
+ pub fn copied(self) -> Result<T, E> {
+ self.map(|&mut t| t)
+ }
+}
+
+impl<T: Clone, E> Result<&T, E> {
+ /// Maps a `Result<&T, E>` to a `Result<T, E>` by cloning the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_cloned)]
+ /// let val = 12;
+ /// let x: Result<&i32, i32> = Ok(&val);
+ /// assert_eq!(x, Ok(&12));
+ /// let cloned = x.cloned();
+ /// assert_eq!(cloned, Ok(12));
+ /// ```
+ #[unstable(feature = "result_cloned", reason = "newly added", issue = "63168")]
+ pub fn cloned(self) -> Result<T, E> {
+ self.map(|t| t.clone())
+ }
+}
+
+impl<T: Clone, E> Result<&mut T, E> {
+ /// Maps a `Result<&mut T, E>` to a `Result<T, E>` by cloning the contents of the
+ /// `Ok` part.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(result_cloned)]
+ /// let mut val = 12;
+ /// let x: Result<&mut i32, i32> = Ok(&mut val);
+ /// assert_eq!(x, Ok(&mut 12));
+ /// let cloned = x.cloned();
+ /// assert_eq!(cloned, Ok(12));
+ /// ```
+ #[unstable(feature = "result_cloned", reason = "newly added", issue = "63168")]
+ pub fn cloned(self) -> Result<T, E> {
+ self.map(|t| t.clone())
+ }
+}
+
+impl<T, E: fmt::Debug> Result<T, E> {
+ /// Returns the contained [`Ok`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Err`], with a panic message including the
+ /// passed message, and the content of the [`Err`].
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```{.should_panic}
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// x.expect("Testing expect"); // panics with `Testing expect: emergency failure`
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "result_expect", since = "1.4.0")]
+ pub fn expect(self, msg: &str) -> T {
+ match self {
+ Ok(t) => t,
+ Err(e) => unwrap_failed(msg, &e),
+ }
+ }
+
+ /// Returns the contained [`Ok`] value, consuming the `self` value.
+ ///
+ /// Because this function may panic, its use is generally discouraged.
+ /// Instead, prefer to use pattern matching and handle the [`Err`]
+ /// case explicitly, or call [`unwrap_or`], [`unwrap_or_else`], or
+ /// [`unwrap_or_default`].
+ ///
+ /// [`unwrap_or`]: Result::unwrap_or
+ /// [`unwrap_or_else`]: Result::unwrap_or_else
+ /// [`unwrap_or_default`]: Result::unwrap_or_default
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Err`], with a panic message provided by the
+ /// [`Err`]'s value.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(2);
+ /// assert_eq!(x.unwrap(), 2);
+ /// ```
+ ///
+ /// ```{.should_panic}
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// x.unwrap(); // panics with `emergency failure`
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap(self) -> T {
+ match self {
+ Ok(t) => t,
+ Err(e) => unwrap_failed("called `Result::unwrap()` on an `Err` value", &e),
+ }
+ }
+}
+
+impl<T: fmt::Debug, E> Result<T, E> {
+ /// Returns the contained [`Err`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Ok`], with a panic message including the
+ /// passed message, and the content of the [`Ok`].
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```{.should_panic}
+ /// let x: Result<u32, &str> = Ok(10);
+ /// x.expect_err("Testing expect_err"); // panics with `Testing expect_err: 10`
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "result_expect_err", since = "1.17.0")]
+ pub fn expect_err(self, msg: &str) -> E {
+ match self {
+ Ok(t) => unwrap_failed(msg, &t),
+ Err(e) => e,
+ }
+ }
+
+ /// Returns the contained [`Err`] value, consuming the `self` value.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the value is an [`Ok`], with a custom panic message provided
+ /// by the [`Ok`]'s value.
+ ///
+ ///
+ ///
+ /// # Examples
+ ///
+ /// ```{.should_panic}
+ /// let x: Result<u32, &str> = Ok(2);
+ /// x.unwrap_err(); // panics with `2`
+ /// ```
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Err("emergency failure");
+ /// assert_eq!(x.unwrap_err(), "emergency failure");
+ /// ```
+ #[inline]
+ #[track_caller]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn unwrap_err(self) -> E {
+ match self {
+ Ok(t) => unwrap_failed("called `Result::unwrap_err()` on an `Ok` value", &t),
+ Err(e) => e,
+ }
+ }
+}
+
+impl<T: Default, E> Result<T, E> {
+ /// Returns the contained [`Ok`] value or a default
+ ///
+ /// Consumes the `self` argument then, if [`Ok`], returns the contained
+ /// value, otherwise if [`Err`], returns the default value for that
+ /// type.
+ ///
+ /// # Examples
+ ///
+ /// Converts a string to an integer, turning poorly-formed strings
+ /// into 0 (the default value for integers). [`parse`] converts
+ /// a string to any other type that implements [`FromStr`], returning an
+ /// [`Err`] on error.
+ ///
+ /// ```
+ /// let good_year_from_input = "1909";
+ /// let bad_year_from_input = "190blarg";
+ /// let good_year = good_year_from_input.parse().unwrap_or_default();
+ /// let bad_year = bad_year_from_input.parse().unwrap_or_default();
+ ///
+ /// assert_eq!(1909, good_year);
+ /// assert_eq!(0, bad_year);
+ /// ```
+ ///
+ /// [`parse`]: str::parse
+ /// [`FromStr`]: crate::str::FromStr
+ #[inline]
+ #[stable(feature = "result_unwrap_or_default", since = "1.16.0")]
+ pub fn unwrap_or_default(self) -> T {
+ match self {
+ Ok(x) => x,
+ Err(_) => Default::default(),
+ }
+ }
+}
+
+#[unstable(feature = "unwrap_infallible", reason = "newly added", issue = "61695")]
+impl<T, E: Into<!>> Result<T, E> {
+ /// Returns the contained [`Ok`] value, but never panics.
+ ///
+ /// Unlike [`unwrap`], this method is known to never panic on the
+ /// result types it is implemented for. Therefore, it can be used
+ /// instead of `unwrap` as a maintainability safeguard that will fail
+ /// to compile if the error type of the `Result` is later changed
+ /// to an error that can actually occur.
+ ///
+ /// [`unwrap`]: Result::unwrap
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(never_type)]
+ /// # #![feature(unwrap_infallible)]
+ ///
+ /// fn only_good_news() -> Result<String, !> {
+ /// Ok("this is fine".into())
+ /// }
+ ///
+ /// let s: String = only_good_news().into_ok();
+ /// println!("{}", s);
+ /// ```
+ #[inline]
+ pub fn into_ok(self) -> T {
+ match self {
+ Ok(x) => x,
+ Err(e) => e.into(),
+ }
+ }
+}
+
+impl<T: Deref, E> Result<T, E> {
+ /// Converts from `Result<T, E>` (or `&Result<T, E>`) to `Result<&<T as Deref>::Target, &E>`.
+ ///
+ /// Coerces the [`Ok`] variant of the original [`Result`] via [`Deref`](crate::ops::Deref)
+ /// and returns the new [`Result`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x: Result<String, u32> = Ok("hello".to_string());
+ /// let y: Result<&str, &u32> = Ok("hello");
+ /// assert_eq!(x.as_deref(), y);
+ ///
+ /// let x: Result<String, u32> = Err(42);
+ /// let y: Result<&str, &u32> = Err(&42);
+ /// assert_eq!(x.as_deref(), y);
+ /// ```
+ #[stable(feature = "inner_deref", since = "1.47.0")]
+ pub fn as_deref(&self) -> Result<&T::Target, &E> {
+ self.as_ref().map(|t| t.deref())
+ }
+}
+
+impl<T: DerefMut, E> Result<T, E> {
+ /// Converts from `Result<T, E>` (or `&mut Result<T, E>`) to `Result<&mut <T as DerefMut>::Target, &mut E>`.
+ ///
+ /// Coerces the [`Ok`] variant of the original [`Result`] via [`DerefMut`](crate::ops::DerefMut)
+ /// and returns the new [`Result`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = "HELLO".to_string();
+ /// let mut x: Result<String, u32> = Ok("hello".to_string());
+ /// let y: Result<&mut str, &mut u32> = Ok(&mut s);
+ /// assert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);
+ ///
+ /// let mut i = 42;
+ /// let mut x: Result<String, u32> = Err(42);
+ /// let y: Result<&mut str, &mut u32> = Err(&mut i);
+ /// assert_eq!(x.as_deref_mut().map(|x| { x.make_ascii_uppercase(); x }), y);
+ /// ```
+ #[stable(feature = "inner_deref", since = "1.47.0")]
+ pub fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E> {
+ self.as_mut().map(|t| t.deref_mut())
+ }
+}
+
+impl<T, E> Result<Option<T>, E> {
+ /// Transposes a `Result` of an `Option` into an `Option` of a `Result`.
+ ///
+ /// `Ok(None)` will be mapped to `None`.
+ /// `Ok(Some(_))` and `Err(_)` will be mapped to `Some(Ok(_))` and `Some(Err(_))`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #[derive(Debug, Eq, PartialEq)]
+ /// struct SomeErr;
+ ///
+ /// let x: Result<Option<i32>, SomeErr> = Ok(Some(5));
+ /// let y: Option<Result<i32, SomeErr>> = Some(Ok(5));
+ /// assert_eq!(x.transpose(), y);
+ /// ```
+ #[inline]
+ #[stable(feature = "transpose_result", since = "1.33.0")]
+ pub fn transpose(self) -> Option<Result<T, E>> {
+ match self {
+ Ok(Some(x)) => Some(Ok(x)),
+ Ok(None) => None,
+ Err(e) => Some(Err(e)),
+ }
+ }
+}
+
+impl<T, E> Result<Result<T, E>, E> {
+ /// Converts from `Result<Result<T, E>, E>` to `Result<T, E>`
+ ///
+ /// # Examples
+ /// Basic usage:
+ /// ```
+ /// #![feature(result_flattening)]
+ /// let x: Result<Result<&'static str, u32>, u32> = Ok(Ok("hello"));
+ /// assert_eq!(Ok("hello"), x.flatten());
+ ///
+ /// let x: Result<Result<&'static str, u32>, u32> = Ok(Err(6));
+ /// assert_eq!(Err(6), x.flatten());
+ ///
+ /// let x: Result<Result<&'static str, u32>, u32> = Err(6);
+ /// assert_eq!(Err(6), x.flatten());
+ /// ```
+ ///
+ /// Flattening once only removes one level of nesting:
+ ///
+ /// ```
+ /// #![feature(result_flattening)]
+ /// let x: Result<Result<Result<&'static str, u32>, u32>, u32> = Ok(Ok(Ok("hello")));
+ /// assert_eq!(Ok(Ok("hello")), x.flatten());
+ /// assert_eq!(Ok("hello"), x.flatten().flatten());
+ /// ```
+ #[inline]
+ #[unstable(feature = "result_flattening", issue = "70142")]
+ pub fn flatten(self) -> Result<T, E> {
+ self.and_then(convert::identity)
+ }
+}
+
+// This is a separate function to reduce the code size of the methods
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn unwrap_failed(msg: &str, error: &dyn fmt::Debug) -> ! {
+ panic!("{}: {:?}", msg, error)
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Trait implementations
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, E: Clone> Clone for Result<T, E> {
+ #[inline]
+ fn clone(&self) -> Self {
+ match self {
+ Ok(x) => Ok(x.clone()),
+ Err(x) => Err(x.clone()),
+ }
+ }
+
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (Ok(to), Ok(from)) => to.clone_from(from),
+ (Err(to), Err(from)) => to.clone_from(from),
+ (to, from) => *to = from.clone(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, E> IntoIterator for Result<T, E> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Returns a consuming iterator over the possibly contained value.
+ ///
+ /// The iterator yields one value if the result is [`Result::Ok`], otherwise none.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let x: Result<u32, &str> = Ok(5);
+ /// let v: Vec<u32> = x.into_iter().collect();
+ /// assert_eq!(v, [5]);
+ ///
+ /// let x: Result<u32, &str> = Err("nothing!");
+ /// let v: Vec<u32> = x.into_iter().collect();
+ /// assert_eq!(v, []);
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { inner: self.ok() }
+ }
+}
+
+#[stable(since = "1.4.0", feature = "result_iter")]
+impl<'a, T, E> IntoIterator for &'a Result<T, E> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(since = "1.4.0", feature = "result_iter")]
+impl<'a, T, E> IntoIterator for &'a mut Result<T, E> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// The Result Iterators
+/////////////////////////////////////////////////////////////////////////////
+
+/// An iterator over a reference to the [`Ok`] variant of a [`Result`].
+///
+/// The iterator yields one value if the result is [`Ok`], otherwise none.
+///
+/// Created by [`Result::iter`].
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ inner: Option<&'a T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.inner.take()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = if self.inner.is_some() { 1 } else { 0 };
+ (n, Some(n))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.inner.take()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for Iter<'_, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Iter { inner: self.inner }
+ }
+}
+
+/// An iterator over a mutable reference to the [`Ok`] variant of a [`Result`].
+///
+/// Created by [`Result::iter_mut`].
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ inner: Option<&'a mut T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ self.inner.take()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = if self.inner.is_some() { 1 } else { 0 };
+ (n, Some(n))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut T> {
+ self.inner.take()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IterMut<'_, A> {}
+
+/// An iterator over the value in a [`Ok`] variant of a [`Result`].
+///
+/// The iterator yields one value if the result is [`Ok`], otherwise none.
+///
+/// This struct is created by the [`into_iter`] method on
+/// [`Result`] (provided by the [`IntoIterator`] trait).
+///
+/// [`into_iter`]: IntoIterator::into_iter
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<T> {
+ inner: Option<T>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.take()
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = if self.inner.is_some() { 1 } else { 0 };
+ (n, Some(n))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.inner.take()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<A> TrustedLen for IntoIter<A> {}
+
+/////////////////////////////////////////////////////////////////////////////
+// FromIterator
+/////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, E, V: FromIterator<A>> FromIterator<Result<A, E>> for Result<V, E> {
+ /// Takes each element in the `Iterator`: if it is an `Err`, no further
+ /// elements are taken, and the `Err` is returned. Should no `Err` occur, a
+ /// container with the values of each `Result` is returned.
+ ///
+ /// Here is an example which increments every integer in a vector,
+ /// checking for overflow:
+ ///
+ /// ```
+ /// let v = vec![1, 2];
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|
+ /// x.checked_add(1).ok_or("Overflow!")
+ /// ).collect();
+ /// assert_eq!(res, Ok(vec![2, 3]));
+ /// ```
+ ///
+ /// Here is another example that tries to subtract one from another list
+ /// of integers, this time checking for underflow:
+ ///
+ /// ```
+ /// let v = vec![1, 2, 0];
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|
+ /// x.checked_sub(1).ok_or("Underflow!")
+ /// ).collect();
+ /// assert_eq!(res, Err("Underflow!"));
+ /// ```
+ ///
+ /// Here is a variation on the previous example, showing that no
+ /// further elements are taken from `iter` after the first `Err`.
+ ///
+ /// ```
+ /// let v = vec![3, 2, 1, 10];
+ /// let mut shared = 0;
+ /// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32| {
+ /// shared += x;
+ /// x.checked_sub(2).ok_or("Underflow!")
+ /// }).collect();
+ /// assert_eq!(res, Err("Underflow!"));
+ /// assert_eq!(shared, 6);
+ /// ```
+ ///
+ /// Since the third element caused an underflow, no further elements were taken,
+ /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16.
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = Result<A, E>>>(iter: I) -> Result<V, E> {
+ // FIXME(#11084): This could be replaced with Iterator::scan when this
+ // performance bug is closed.
+
+ iter::process_results(iter.into_iter(), |i| i.collect())
+ }
+}
+
+#[unstable(feature = "try_trait", issue = "42327")]
+impl<T, E> ops::Try for Result<T, E> {
+ type Ok = T;
+ type Error = E;
+
+ #[inline]
+ fn into_result(self) -> Self {
+ self
+ }
+
+ #[inline]
+ fn from_ok(v: T) -> Self {
+ Ok(v)
+ }
+
+ #[inline]
+ fn from_error(v: E) -> Self {
+ Err(v)
+ }
+}
--- /dev/null
+//! Operations on ASCII `[u8]`.
+
+use crate::mem;
+
+#[lang = "slice_u8"]
+#[cfg(not(test))]
+impl [u8] {
+ /// Checks if all bytes in this slice are within the ASCII range.
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ is_ascii(self)
+ }
+
+ /// Checks that two slices are an ASCII case-insensitive match.
+ ///
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
+ /// but without allocating and copying temporaries.
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
+ self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a.eq_ignore_ascii_case(b))
+ }
+
+ /// Converts this slice to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ for byte in self {
+ byte.make_ascii_uppercase();
+ }
+ }
+
+ /// Converts this slice to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ for byte in self {
+ byte.make_ascii_lowercase();
+ }
+ }
+}
+
+/// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
+/// from `../str/mod.rs`, which does something similar for utf8 validation.
+#[inline]
+fn contains_nonascii(v: usize) -> bool {
+ const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
+ (NONASCII_MASK & v) != 0
+}
+
+/// Optimized ASCII test that will use usize-at-a-time operations instead of
+/// byte-at-a-time operations (when possible).
+///
+/// The algorithm we use here is pretty simple. If `s` is too short, we just
+/// check each byte and be done with it. Otherwise:
+///
+/// - Read the first word with an unaligned load.
+/// - Align the pointer, read subsequent words until end with aligned loads.
+/// - Read the last `usize` from `s` with an unaligned load.
+///
+/// If any of these loads produces something for which `contains_nonascii`
+/// (above) returns true, then we know the answer is false.
+#[inline]
+fn is_ascii(s: &[u8]) -> bool {
+ const USIZE_SIZE: usize = mem::size_of::<usize>();
+
+ let len = s.len();
+ let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
+
+ // If we wouldn't gain anything from the word-at-a-time implementation, fall
+ // back to a scalar loop.
+ //
+ // We also do this for architectures where `size_of::<usize>()` isn't
+ // sufficient alignment for `usize`, because it's a weird edge case.
+ if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < mem::align_of::<usize>() {
+ return s.iter().all(|b| b.is_ascii());
+ }
+
+ // We always read the first word unaligned, which means `align_offset` is
+ // 0, we'd read the same value again for the aligned read.
+ let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
+
+ let start = s.as_ptr();
+ // SAFETY: We verify `len < USIZE_SIZE` above.
+ let first_word = unsafe { (start as *const usize).read_unaligned() };
+
+ if contains_nonascii(first_word) {
+ return false;
+ }
+ // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
+ // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
+ // above.
+ debug_assert!(offset_to_aligned <= len);
+
+ // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
+ // middle chunk of the slice.
+ let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
+
+ // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
+ let mut byte_pos = offset_to_aligned;
+
+ // Paranoia check about alignment, since we're about to do a bunch of
+ // unaligned loads. In practice this should be impossible barring a bug in
+ // `align_offset` though.
+ debug_assert_eq!((word_ptr as usize) % mem::align_of::<usize>(), 0);
+
+ // Read subsequent words until the last aligned word, excluding the last
+ // aligned word by itself to be done in tail check later, to ensure that
+ // tail is always one `usize` at most to extra branch `byte_pos == len`.
+ while byte_pos < len - USIZE_SIZE {
+ debug_assert!(
+ // Sanity check that the read is in bounds
+ (word_ptr as usize + USIZE_SIZE) <= (start.wrapping_add(len) as usize) &&
+ // And that our assumptions about `byte_pos` hold.
+ (word_ptr as usize) - (start as usize) == byte_pos
+ );
+
+ // SAFETY: We know `word_ptr` is properly aligned (because of
+ // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
+ let word = unsafe { word_ptr.read() };
+ if contains_nonascii(word) {
+ return false;
+ }
+
+ byte_pos += USIZE_SIZE;
+ // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
+ // after this `add`, `word_ptr` will be at most one-past-the-end.
+ word_ptr = unsafe { word_ptr.add(1) };
+ }
+
+ // Sanity check to ensure there really is only one `usize` left. This should
+ // be guaranteed by our loop condition.
+ debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
+
+ // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
+ let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
+
+ !contains_nonascii(last_word)
+}
--- /dev/null
+//! Comparison traits for `[T]`.
+
+use crate::cmp;
+use crate::cmp::Ordering::{self, Greater, Less};
+use crate::mem;
+
+use super::from_raw_parts;
+use super::memchr;
+
+extern "C" {
+ /// Calls implementation provided memcmp.
+ ///
+ /// Interprets the data as u8.
+ ///
+ /// Returns 0 for equal, < 0 for less than and > 0 for greater
+ /// than.
+ // FIXME(#32610): Return type should be c_int
+ fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<A, B> PartialEq<[B]> for [A]
+where
+ A: PartialEq<B>,
+{
+ fn eq(&self, other: &[B]) -> bool {
+ SlicePartialEq::equal(self, other)
+ }
+
+ fn ne(&self, other: &[B]) -> bool {
+ SlicePartialEq::not_equal(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq> Eq for [T] {}
+
+/// Implements comparison of vectors [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Ord for [T] {
+ fn cmp(&self, other: &[T]) -> Ordering {
+ SliceOrd::compare(self, other)
+ }
+}
+
+/// Implements comparison of vectors [lexicographically](Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd> PartialOrd for [T] {
+ fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
+ SlicePartialOrd::partial_compare(self, other)
+ }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialEq
+trait SlicePartialEq<B> {
+ fn equal(&self, other: &[B]) -> bool;
+
+ fn not_equal(&self, other: &[B]) -> bool {
+ !self.equal(other)
+ }
+}
+
+// Generic slice equality
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: PartialEq<B>,
+{
+ default fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter().zip(other.iter()).all(|(x, y)| x == y)
+ }
+}
+
+// Use an equal-pointer optimization when types are `Eq`
+// We can't make `A` and `B` the same type because `min_specialization` won't
+// allow it.
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: MarkerEq<B>,
+{
+ default fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ // While performance would suffer if `guaranteed_eq` just returned `false`
+ // for all arguments, correctness and return value of this function are not affected.
+ if self.as_ptr().guaranteed_eq(other.as_ptr() as *const A) {
+ return true;
+ }
+
+ self.iter().zip(other.iter()).all(|(x, y)| x == y)
+ }
+}
+
+// Use memcmp for bytewise equality when the types allow
+impl<A, B> SlicePartialEq<B> for [A]
+where
+ A: BytewiseEquality<B>,
+{
+ fn equal(&self, other: &[B]) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ // While performance would suffer if `guaranteed_eq` just returned `false`
+ // for all arguments, correctness and return value of this function are not affected.
+ if self.as_ptr().guaranteed_eq(other.as_ptr() as *const A) {
+ return true;
+ }
+ // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
+ // The two slices have been checked to have the same size above.
+ unsafe {
+ let size = mem::size_of_val(self);
+ memcmp(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
+ }
+ }
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's PartialOrd
+trait SlicePartialOrd: Sized {
+ fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
+}
+
+impl<A: PartialOrd> SlicePartialOrd for A {
+ default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ let l = cmp::min(left.len(), right.len());
+
+ // Slice to the loop iteration range to enable bound check
+ // elimination in the compiler
+ let lhs = &left[..l];
+ let rhs = &right[..l];
+
+ for i in 0..l {
+ match lhs[i].partial_cmp(&rhs[i]) {
+ Some(Ordering::Equal) => (),
+ non_eq => return non_eq,
+ }
+ }
+
+ left.len().partial_cmp(&right.len())
+ }
+}
+
+// This is the impl that we would like to have. Unfortunately it's not sound.
+// See `partial_ord_slice.rs`.
+/*
+impl<A> SlicePartialOrd for A
+where
+ A: Ord,
+{
+ default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ Some(SliceOrd::compare(left, right))
+ }
+}
+*/
+
+impl<A: AlwaysApplicableOrd> SlicePartialOrd for A {
+ fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
+ Some(SliceOrd::compare(left, right))
+ }
+}
+
+#[rustc_specialization_trait]
+trait AlwaysApplicableOrd: SliceOrd + Ord {}
+
+macro_rules! always_applicable_ord {
+ ($([$($p:tt)*] $t:ty,)*) => {
+ $(impl<$($p)*> AlwaysApplicableOrd for $t {})*
+ }
+}
+
+always_applicable_ord! {
+ [] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
+ [] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
+ [] bool, [] char,
+ [T: ?Sized] *const T, [T: ?Sized] *mut T,
+ [T: AlwaysApplicableOrd] &T,
+ [T: AlwaysApplicableOrd] &mut T,
+ [T: AlwaysApplicableOrd] Option<T>,
+}
+
+#[doc(hidden)]
+// intermediate trait for specialization of slice's Ord
+trait SliceOrd: Sized {
+ fn compare(left: &[Self], right: &[Self]) -> Ordering;
+}
+
+impl<A: Ord> SliceOrd for A {
+ default fn compare(left: &[Self], right: &[Self]) -> Ordering {
+ let l = cmp::min(left.len(), right.len());
+
+ // Slice to the loop iteration range to enable bound check
+ // elimination in the compiler
+ let lhs = &left[..l];
+ let rhs = &right[..l];
+
+ for i in 0..l {
+ match lhs[i].cmp(&rhs[i]) {
+ Ordering::Equal => (),
+ non_eq => return non_eq,
+ }
+ }
+
+ left.len().cmp(&right.len())
+ }
+}
+
+// memcmp compares a sequence of unsigned bytes lexicographically.
+// this matches the order we want for [u8], but no others (not even [i8]).
+impl SliceOrd for u8 {
+ #[inline]
+ fn compare(left: &[Self], right: &[Self]) -> Ordering {
+ let order =
+ // SAFETY: `left` and `right` are references and are thus guaranteed to be valid.
+ // We use the minimum of both lengths which guarantees that both regions are
+ // valid for reads in that interval.
+ unsafe { memcmp(left.as_ptr(), right.as_ptr(), cmp::min(left.len(), right.len())) };
+ if order == 0 {
+ left.len().cmp(&right.len())
+ } else if order < 0 {
+ Less
+ } else {
+ Greater
+ }
+ }
+}
+
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+trait MarkerEq<T>: PartialEq<T> {}
+
+impl<T: Eq> MarkerEq<T> for T {}
+
+#[doc(hidden)]
+/// Trait implemented for types that can be compared for equality using
+/// their bytewise representation
+#[rustc_specialization_trait]
+trait BytewiseEquality<T>: MarkerEq<T> + Copy {}
+
+macro_rules! impl_marker_for {
+ ($traitname:ident, $($ty:ty)*) => {
+ $(
+ impl $traitname<$ty> for $ty { }
+ )*
+ }
+}
+
+impl_marker_for!(BytewiseEquality,
+ u8 i8 u16 i16 u32 i32 u64 i64 u128 i128 usize isize char bool);
+
+pub(super) trait SliceContains: Sized {
+ fn slice_contains(&self, x: &[Self]) -> bool;
+}
+
+impl<T> SliceContains for T
+where
+ T: PartialEq,
+{
+ default fn slice_contains(&self, x: &[Self]) -> bool {
+ x.iter().any(|y| *y == *self)
+ }
+}
+
+impl SliceContains for u8 {
+ #[inline]
+ fn slice_contains(&self, x: &[Self]) -> bool {
+ memchr::memchr(*self, x).is_some()
+ }
+}
+
+impl SliceContains for i8 {
+ #[inline]
+ fn slice_contains(&self, x: &[Self]) -> bool {
+ let byte = *self as u8;
+ // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()`
+ // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed
+ // to be valid for reads for the length of the slice `x.len()`, which cannot be larger
+ // than `isize::MAX`. The returned slice is never mutated.
+ let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
+ memchr::memchr(byte, bytes).is_some()
+ }
+}
--- /dev/null
+//! Indexing implementations for `[T]`.
+
+use crate::ops;
+use crate::ptr;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, I> ops::Index<I> for [T]
+where
+ I: SliceIndex<[T]>,
+{
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &I::Output {
+ index.index(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, I> ops::IndexMut<I> for [T]
+where
+ I: SliceIndex<[T]>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut I::Output {
+ index.index_mut(self)
+ }
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
+ panic!("range start index {} out of range for slice of length {}", index, len);
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+pub(crate) fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
+ panic!("range end index {} out of range for slice of length {}", index, len);
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+pub(crate) fn slice_index_order_fail(index: usize, end: usize) -> ! {
+ panic!("slice index starts at {} but ends at {}", index, end);
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+pub(crate) fn slice_start_index_overflow_fail() -> ! {
+ panic!("attempted to index slice from after maximum usize");
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+pub(crate) fn slice_end_index_overflow_fail() -> ! {
+ panic!("attempted to index slice up to maximum usize");
+}
+
+mod private_slice_index {
+ use super::ops;
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ pub trait Sealed {}
+
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for usize {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::Range<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeTo<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeFrom<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeFull {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeInclusive<usize> {}
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ impl Sealed for ops::RangeToInclusive<usize> {}
+}
+
+/// A helper trait used for indexing operations.
+///
+/// Implementations of this trait have to promise that if the argument
+/// to `get_(mut_)unchecked` is a safe reference, then so is the result.
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+#[rustc_on_unimplemented(
+ on(T = "str", label = "string indices are ranges of `usize`",),
+ on(
+ all(any(T = "str", T = "&str", T = "std::string::String"), _Self = "{integer}"),
+ note = "you can use `.chars().nth()` or `.bytes().nth()`\n\
+ for more information, see chapter 8 in The Book: \
+ <https://doc.rust-lang.org/book/ch08-02-strings.html#indexing-into-strings>"
+ ),
+ message = "the type `{T}` cannot be indexed by `{Self}`",
+ label = "slice indices are of type `usize` or ranges of `usize`"
+)]
+pub unsafe trait SliceIndex<T: ?Sized>: private_slice_index::Sealed {
+ /// The output type returned by methods.
+ #[stable(feature = "slice_get_slice", since = "1.28.0")]
+ type Output: ?Sized;
+
+ /// Returns a shared reference to the output at this location, if in
+ /// bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ fn get(self, slice: &T) -> Option<&Self::Output>;
+
+ /// Returns a mutable reference to the output at this location, if in
+ /// bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ fn get_mut(self, slice: &mut T) -> Option<&mut Self::Output>;
+
+ /// Returns a shared reference to the output at this location, without
+ /// performing any bounds checking.
+ /// Calling this method with an out-of-bounds index or a dangling `slice` pointer
+ /// is *[undefined behavior]* even if the resulting reference is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ unsafe fn get_unchecked(self, slice: *const T) -> *const Self::Output;
+
+ /// Returns a mutable reference to the output at this location, without
+ /// performing any bounds checking.
+ /// Calling this method with an out-of-bounds index or a dangling `slice` pointer
+ /// is *[undefined behavior]* even if the resulting reference is not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ unsafe fn get_unchecked_mut(self, slice: *mut T) -> *mut Self::Output;
+
+ /// Returns a shared reference to the output at this location, panicking
+ /// if out of bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ #[track_caller]
+ fn index(self, slice: &T) -> &Self::Output;
+
+ /// Returns a mutable reference to the output at this location, panicking
+ /// if out of bounds.
+ #[unstable(feature = "slice_index_methods", issue = "none")]
+ #[track_caller]
+ fn index_mut(self, slice: &mut T) -> &mut Self::Output;
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+unsafe impl<T> SliceIndex<[T]> for usize {
+ type Output = T;
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&T> {
+ // SAFETY: `self` is checked to be in bounds.
+ if self < slice.len() { unsafe { Some(&*self.get_unchecked(slice)) } } else { None }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
+ // SAFETY: `self` is checked to be in bounds.
+ if self < slice.len() { unsafe { Some(&mut *self.get_unchecked_mut(slice)) } } else { None }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
+ // SAFETY: the caller guarantees that `slice` is not dangling, so it
+ // cannot be longer than `isize::MAX`. They also guarantee that
+ // `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
+ // so the call to `add` is safe.
+ unsafe { slice.as_ptr().add(self) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
+ // SAFETY: see comments for `get_unchecked` above.
+ unsafe { slice.as_mut_ptr().add(self) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &T {
+ // N.B., use intrinsic indexing
+ &(*slice)[self]
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut T {
+ // N.B., use intrinsic indexing
+ &mut (*slice)[self]
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+unsafe impl<T> SliceIndex<[T]> for ops::Range<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ if self.start > self.end || self.end > slice.len() {
+ None
+ } else {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { Some(&*self.get_unchecked(slice)) }
+ }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ if self.start > self.end || self.end > slice.len() {
+ None
+ } else {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { Some(&mut *self.get_unchecked_mut(slice)) }
+ }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller guarantees that `slice` is not dangling, so it
+ // cannot be longer than `isize::MAX`. They also guarantee that
+ // `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
+ // so the call to `add` is safe.
+ unsafe { ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: see comments for `get_unchecked` above.
+ unsafe {
+ ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
+ }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if self.start > self.end {
+ slice_index_order_fail(self.start, self.end);
+ } else if self.end > slice.len() {
+ slice_end_index_len_fail(self.end, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &*self.get_unchecked(slice) }
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if self.start > self.end {
+ slice_index_order_fail(self.start, self.end);
+ } else if self.end > slice.len() {
+ slice_end_index_len_fail(self.end, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+unsafe impl<T> SliceIndex<[T]> for ops::RangeTo<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ (0..self.end).get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ (0..self.end).get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (0..self.end).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (0..self.end).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ (0..self.end).index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ (0..self.end).index_mut(slice)
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+unsafe impl<T> SliceIndex<[T]> for ops::RangeFrom<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ (self.start..slice.len()).get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ (self.start..slice.len()).get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (self.start..slice.len()).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if self.start > slice.len() {
+ slice_start_index_len_fail(self.start, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &*self.get_unchecked(slice) }
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if self.start > slice.len() {
+ slice_start_index_len_fail(self.start, slice.len());
+ }
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ }
+}
+
+#[stable(feature = "slice_get_slice_impls", since = "1.15.0")]
+unsafe impl<T> SliceIndex<[T]> for ops::RangeFull {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ Some(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ Some(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ slice
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ slice
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ slice
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ slice
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+unsafe impl<T> SliceIndex<[T]> for ops::RangeInclusive<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { self.into_slice_range().get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { self.into_slice_range().get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ if *self.end() == usize::MAX {
+ slice_end_index_overflow_fail();
+ }
+ self.into_slice_range().index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ if *self.end() == usize::MAX {
+ slice_end_index_overflow_fail();
+ }
+ self.into_slice_range().index_mut(slice)
+ }
+}
+
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+unsafe impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
+ type Output = [T];
+
+ #[inline]
+ fn get(self, slice: &[T]) -> Option<&[T]> {
+ (0..=self.end).get(slice)
+ }
+
+ #[inline]
+ fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
+ (0..=self.end).get_mut(slice)
+ }
+
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+ unsafe { (0..=self.end).get_unchecked(slice) }
+ }
+
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
+ // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (0..=self.end).get_unchecked_mut(slice) }
+ }
+
+ #[inline]
+ fn index(self, slice: &[T]) -> &[T] {
+ (0..=self.end).index(slice)
+ }
+
+ #[inline]
+ fn index_mut(self, slice: &mut [T]) -> &mut [T] {
+ (0..=self.end).index_mut(slice)
+ }
+}
--- /dev/null
+//! Definitions of a bunch of iterators for `[T]`.
+
+#[macro_use] // import iterator! and forward_iterator!
+mod macros;
+
+use crate::cmp;
+use crate::cmp::Ordering;
+use crate::fmt;
+use crate::intrinsics::{assume, exact_div, unchecked_sub};
+use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess};
+use crate::marker::{PhantomData, Send, Sized, Sync};
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr::NonNull;
+
+use super::{from_raw_parts, from_raw_parts_mut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a [T] {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a mut [T] {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+// Macro helper functions
+#[inline(always)]
+fn size_from_ptr<T>(_: *const T) -> usize {
+ mem::size_of::<T>()
+}
+
+/// Immutable slice iterator
+///
+/// This struct is created by the [`iter`] method on [slices].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // First, we declare a type which has `iter` method to get the `Iter` struct (&[usize here]):
+/// let slice = &[1, 2, 3];
+///
+/// // Then, we iterate over it:
+/// for element in slice.iter() {
+/// println!("{}", element);
+/// }
+/// ```
+///
+/// [`iter`]: ../../std/primitive.slice.html#method.iter
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ ptr: NonNull<T>,
+ end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
+ _marker: PhantomData<&'a T>,
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.as_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for Iter<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Send for Iter<'_, T> {}
+
+impl<'a, T> Iter<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T]) -> Self {
+ let ptr = slice.as_ptr();
+ // SAFETY: Similar to `IterMut::new`.
+ unsafe {
+ assume(!ptr.is_null());
+
+ let end = if mem::size_of::<T>() == 0 {
+ (ptr as *const u8).wrapping_add(slice.len()) as *const T
+ } else {
+ ptr.add(slice.len())
+ };
+
+ Self { ptr: NonNull::new_unchecked(ptr as *mut T), end, _marker: PhantomData }
+ }
+ }
+
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// This has the same lifetime as the original slice, and so the
+ /// iterator can continue to be used while this exists.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // First, we declare a type which has the `iter` method to get the `Iter`
+ /// // struct (&[usize here]):
+ /// let slice = &[1, 2, 3];
+ ///
+ /// // Then, we get the iterator:
+ /// let mut iter = slice.iter();
+ /// // So if we print what `as_slice` method returns here, we have "[1, 2, 3]":
+ /// println!("{:?}", iter.as_slice());
+ ///
+ /// // Next, we move to the second element of the slice:
+ /// iter.next();
+ /// // Now `as_slice` returns "[2, 3]":
+ /// println!("{:?}", iter.as_slice());
+ /// ```
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ pub fn as_slice(&self) -> &'a [T] {
+ self.make_slice()
+ }
+}
+
+iterator! {struct Iter -> *const T, &'a T, const, {/* no mut */}, {
+ fn is_sorted_by<F>(self, mut compare: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(&Self::Item, &Self::Item) -> Option<Ordering>,
+ {
+ self.as_slice().windows(2).all(|w| {
+ compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false)
+ })
+ }
+}}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { ptr: self.ptr, end: self.end, _marker: self._marker }
+ }
+}
+
+#[stable(feature = "slice_iter_as_ref", since = "1.13.0")]
+impl<T> AsRef<[T]> for Iter<'_, T> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+/// Mutable slice iterator.
+///
+/// This struct is created by the [`iter_mut`] method on [slices].
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // First, we declare a type which has `iter_mut` method to get the `IterMut`
+/// // struct (&[usize here]):
+/// let mut slice = &mut [1, 2, 3];
+///
+/// // Then, we iterate over it and increment each element value:
+/// for element in slice.iter_mut() {
+/// *element += 1;
+/// }
+///
+/// // We now have "[2, 3, 4]":
+/// println!("{:?}", slice);
+/// ```
+///
+/// [`iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ ptr: NonNull<T>,
+ end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
+ _marker: PhantomData<&'a mut T>,
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IterMut").field(&self.make_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+
+impl<'a, T> IterMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T]) -> Self {
+ let ptr = slice.as_mut_ptr();
+ // SAFETY: There are several things here:
+ //
+ // `ptr` has been obtained by `slice.as_ptr()` where `slice` is a valid
+ // reference thus it is non-NUL and safe to use and pass to
+ // `NonNull::new_unchecked` .
+ //
+ // Adding `slice.len()` to the starting pointer gives a pointer
+ // at the end of `slice`. `end` will never be dereferenced, only checked
+ // for direct pointer equality with `ptr` to check if the iterator is
+ // done.
+ //
+ // In the case of a ZST, the end pointer is just the start pointer plus
+ // the length, to also allows for the fast `ptr == end` check.
+ //
+ // See the `next_unchecked!` and `is_empty!` macros as well as the
+ // `post_inc_start` method for more informations.
+ unsafe {
+ assume(!ptr.is_null());
+
+ let end = if mem::size_of::<T>() == 0 {
+ (ptr as *mut u8).wrapping_add(slice.len()) as *mut T
+ } else {
+ ptr.add(slice.len())
+ };
+
+ Self { ptr: NonNull::new_unchecked(ptr), end, _marker: PhantomData }
+ }
+ }
+
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// To avoid creating `&mut` references that alias, this is forced
+ /// to consume the iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // First, we declare a type which has `iter_mut` method to get the `IterMut`
+ /// // struct (&[usize here]):
+ /// let mut slice = &mut [1, 2, 3];
+ ///
+ /// {
+ /// // Then, we get the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // We move to next element:
+ /// iter.next();
+ /// // So if we print what `into_slice` method returns here, we have "[2, 3]":
+ /// println!("{:?}", iter.into_slice());
+ /// }
+ ///
+ /// // Now let's modify a value of the slice:
+ /// {
+ /// // First we get back the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // We change the value of the first element of the slice returned by the `next` method:
+ /// *iter.next().unwrap() += 1;
+ /// }
+ /// // Now slice is "[2, 2, 3]":
+ /// println!("{:?}", slice);
+ /// ```
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ pub fn into_slice(self) -> &'a mut [T] {
+ // SAFETY: the iterator was created from a mutable slice with pointer
+ // `self.ptr` and length `len!(self)`. This guarantees that all the prerequisites
+ // for `from_raw_parts_mut` are fulfilled.
+ unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
+ }
+
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// To avoid creating `&mut [T]` references that alias, the returned slice
+ /// borrows its lifetime from the iterator the method is applied on.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(slice_iter_mut_as_slice)]
+ /// let mut slice: &mut [usize] = &mut [1, 2, 3];
+ ///
+ /// // First, we get the iterator:
+ /// let mut iter = slice.iter_mut();
+ /// // So if we check what the `as_slice` method returns here, we have "[1, 2, 3]":
+ /// assert_eq!(iter.as_slice(), &[1, 2, 3]);
+ ///
+ /// // Next, we move to the second element of the slice:
+ /// iter.next();
+ /// // Now `as_slice` returns "[2, 3]":
+ /// assert_eq!(iter.as_slice(), &[2, 3]);
+ /// ```
+ #[unstable(feature = "slice_iter_mut_as_slice", reason = "recently added", issue = "58957")]
+ pub fn as_slice(&self) -> &[T] {
+ self.make_slice()
+ }
+}
+
+iterator! {struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}}
+
+/// An internal abstraction over the splitting iterators, so that
+/// splitn, splitn_mut etc can be implemented once.
+#[doc(hidden)]
+pub(super) trait SplitIter: DoubleEndedIterator {
+ /// Marks the underlying iterator as complete, extracting the remaining
+ /// portion of the slice.
+ fn finish(&mut self) -> Option<Self::Item>;
+}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function.
+///
+/// This struct is created by the [`split`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 33, 20];
+/// let mut iter = slice.split(|num| num % 3 == 0);
+/// ```
+///
+/// [`split`]: ../../std/primitive.slice.html#method.split
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Split<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> Split<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], pred: P) -> Self {
+ Self { v: slice, pred, finished: false }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for Split<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Split").field("v", &self.v).field("finished", &self.finished).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, P> Clone for Split<'_, T, P>
+where
+ P: Clone + FnMut(&T) -> bool,
+{
+ fn clone(&self) -> Self {
+ Split { v: self.v, pred: self.pred.clone(), finished: self.finished }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> Iterator for Split<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ match self.v.iter().position(|x| (self.pred)(x)) {
+ None => self.finish(),
+ Some(idx) => {
+ let ret = Some(&self.v[..idx]);
+ self.v = &self.v[idx + 1..];
+ ret
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> DoubleEndedIterator for Split<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ match self.v.iter().rposition(|x| (self.pred)(x)) {
+ None => self.finish(),
+ Some(idx) => {
+ let ret = Some(&self.v[idx + 1..]);
+ self.v = &self.v[..idx];
+ ret
+ }
+ }
+ }
+}
+
+impl<'a, T, P> SplitIter for Split<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ None
+ } else {
+ self.finished = true;
+ Some(self.v)
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, P> FusedIterator for Split<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function. Unlike `Split`, it contains the matched part as a terminator
+/// of the subslice.
+///
+/// This struct is created by the [`split_inclusive`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(split_inclusive)]
+///
+/// let slice = [10, 40, 33, 20];
+/// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+/// ```
+///
+/// [`split_inclusive`]: ../../std/primitive.slice.html#method.split_inclusive
+/// [slices]: ../../std/primitive.slice.html
+#[unstable(feature = "split_inclusive", issue = "72360")]
+pub struct SplitInclusive<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusive<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], pred: P) -> Self {
+ Self { v: slice, pred, finished: false }
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitInclusive<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInclusive")
+ .field("v", &self.v)
+ .field("finished", &self.finished)
+ .finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<T, P> Clone for SplitInclusive<'_, T, P>
+where
+ P: Clone + FnMut(&T) -> bool,
+{
+ fn clone(&self) -> Self {
+ SplitInclusive { v: self.v, pred: self.pred.clone(), finished: self.finished }
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, T, P> Iterator for SplitInclusive<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx =
+ self.v.iter().position(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(self.v.len());
+ if idx == self.v.len() {
+ self.finished = true;
+ }
+ let ret = Some(&self.v[..idx]);
+ self.v = &self.v[idx..];
+ ret
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) }
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, T, P> DoubleEndedIterator for SplitInclusive<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.finished {
+ return None;
+ }
+
+ // The last index of self.v is already checked and found to match
+ // by the last iteration, so we start searching a new match
+ // one index to the left.
+ let remainder = if self.v.is_empty() { &[] } else { &self.v[..(self.v.len() - 1)] };
+ let idx = remainder.iter().rposition(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(0);
+ if idx == 0 {
+ self.finished = true;
+ }
+ let ret = Some(&self.v[idx..]);
+ self.v = &self.v[..idx];
+ ret
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<T, P> FusedIterator for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over the mutable subslices of the vector which are separated
+/// by elements that match `pred`.
+///
+/// This struct is created by the [`split_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut v = [10, 40, 30, 20, 60, 50];
+/// let iter = v.split_mut(|num| *num % 3 == 0);
+/// ```
+///
+/// [`split_mut`]: ../../std/primitive.slice.html#method.split_mut
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SplitMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a mut [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
+ Self { v: slice, pred, finished: false }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitMut").field("v", &self.v).field("finished", &self.finished).finish()
+ }
+}
+
+impl<'a, T, P> SplitIter for SplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ None
+ } else {
+ self.finished = true;
+ Some(mem::replace(&mut self.v, &mut []))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> Iterator for SplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+ self.v.iter().position(|x| (*pred)(x))
+ };
+ match idx_opt {
+ None => self.finish(),
+ Some(idx) => {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = &mut tail[1..];
+ Some(head)
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished {
+ (0, Some(0))
+ } else {
+ // if the predicate doesn't match anything, we yield one slice
+ // if it matches every element, we yield len+1 empty slices.
+ (1, Some(self.v.len() + 1))
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+ self.v.iter().rposition(|x| (*pred)(x))
+ };
+ match idx_opt {
+ None => self.finish(),
+ Some(idx) => {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = head;
+ Some(&mut tail[1..])
+ }
+ }
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, P> FusedIterator for SplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over the mutable subslices of the vector which are separated
+/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched
+/// parts in the ends of the subslices.
+///
+/// This struct is created by the [`split_inclusive_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(split_inclusive)]
+///
+/// let mut v = [10, 40, 30, 20, 60, 50];
+/// let iter = v.split_inclusive_mut(|num| *num % 3 == 0);
+/// ```
+///
+/// [`split_inclusive_mut`]: ../../std/primitive.slice.html#method.split_inclusive_mut
+/// [slices]: ../../std/primitive.slice.html
+#[unstable(feature = "split_inclusive", issue = "72360")]
+pub struct SplitInclusiveMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ v: &'a mut [T],
+ pred: P,
+ finished: bool,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusiveMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
+ Self { v: slice, pred, finished: false }
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitInclusiveMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInclusiveMut")
+ .field("v", &self.v)
+ .field("finished", &self.finished)
+ .finish()
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, T, P> Iterator for SplitInclusiveMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+ self.v.iter().position(|x| (*pred)(x))
+ };
+ let idx = idx_opt.map(|idx| idx + 1).unwrap_or(self.v.len());
+ if idx == self.v.len() {
+ self.finished = true;
+ }
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = tail;
+ Some(head)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.finished {
+ (0, Some(0))
+ } else {
+ // if the predicate doesn't match anything, we yield one slice
+ // if it matches every element, we yield len+1 empty slices.
+ (1, Some(self.v.len() + 1))
+ }
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, T, P> DoubleEndedIterator for SplitInclusiveMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.finished {
+ return None;
+ }
+
+ let idx_opt = if self.v.is_empty() {
+ None
+ } else {
+ // work around borrowck limitations
+ let pred = &mut self.pred;
+
+ // The last index of self.v is already checked and found to match
+ // by the last iteration, so we start searching a new match
+ // one index to the left.
+ let remainder = &self.v[..(self.v.len() - 1)];
+ remainder.iter().rposition(|x| (*pred)(x))
+ };
+ let idx = idx_opt.map(|idx| idx + 1).unwrap_or(0);
+ if idx == 0 {
+ self.finished = true;
+ }
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = head;
+ Some(tail)
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<T, P> FusedIterator for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function, starting from the end of the slice.
+///
+/// This struct is created by the [`rsplit`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [11, 22, 33, 0, 44, 55];
+/// let iter = slice.rsplit(|num| *num == 0);
+/// ```
+///
+/// [`rsplit`]: ../../std/primitive.slice.html#method.rsplit
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+#[derive(Clone)] // Is this correct, or does it incorrectly require `T: Clone`?
+pub struct RSplit<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: Split<'a, T, P>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplit<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], pred: P) -> Self {
+ Self { inner: Split::new(slice, pred) }
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplit<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplit")
+ .field("v", &self.inner.v)
+ .field("finished", &self.inner.finished)
+ .finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> Iterator for RSplit<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ self.inner.next()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> SplitIter for RSplit<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a [T]> {
+ self.inner.finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T, P> FusedIterator for RSplit<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An iterator over the subslices of the vector which are separated
+/// by elements that match `pred`, starting from the end of the slice.
+///
+/// This struct is created by the [`rsplit_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = [11, 22, 33, 0, 44, 55];
+/// let iter = slice.rsplit_mut(|num| *num == 0);
+/// ```
+///
+/// [`rsplit_mut`]: ../../std/primitive.slice.html#method.rsplit_mut
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+pub struct RSplitMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: SplitMut<'a, T, P>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], pred: P) -> Self {
+ Self { inner: SplitMut::new(slice, pred) }
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplitMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplitMut")
+ .field("v", &self.inner.v)
+ .field("finished", &self.inner.finished)
+ .finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> SplitIter for RSplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn finish(&mut self) -> Option<&'a mut [T]> {
+ self.inner.finish()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> Iterator for RSplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ self.inner.next_back()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ self.inner.next()
+ }
+}
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+impl<T, P> FusedIterator for RSplitMut<'_, T, P> where P: FnMut(&T) -> bool {}
+
+/// An private iterator over subslices separated by elements that
+/// match a predicate function, splitting at most a fixed number of
+/// times.
+#[derive(Debug)]
+struct GenericSplitN<I> {
+ iter: I,
+ count: usize,
+}
+
+impl<T, I: SplitIter<Item = T>> Iterator for GenericSplitN<I> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count -= 1;
+ self.iter.finish()
+ }
+ _ => {
+ self.count -= 1;
+ self.iter.next()
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (lower, upper_opt) = self.iter.size_hint();
+ (lower, upper_opt.map(|upper| cmp::min(self.count, upper)))
+ }
+}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function, limited to a given number of splits.
+///
+/// This struct is created by the [`splitn`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.splitn(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`splitn`]: ../../std/primitive.slice.html#method.splitn
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SplitN<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<Split<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitN<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: Split<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitN<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitN").field("inner", &self.inner).finish()
+ }
+}
+
+/// An iterator over subslices separated by elements that match a
+/// predicate function, limited to a given number of splits, starting
+/// from the end of the slice.
+///
+/// This struct is created by the [`rsplitn`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.rsplitn(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`rsplitn`]: ../../std/primitive.slice.html#method.rsplitn
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RSplitN<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<RSplit<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitN<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: RSplit<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplitN<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplitN").field("inner", &self.inner).finish()
+ }
+}
+
+/// An iterator over subslices separated by elements that match a predicate
+/// function, limited to a given number of splits.
+///
+/// This struct is created by the [`splitn_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.splitn_mut(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`splitn_mut`]: ../../std/primitive.slice.html#method.splitn_mut
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SplitNMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<SplitMut<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitNMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: SplitMut<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for SplitNMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitNMut").field("inner", &self.inner).finish()
+ }
+}
+
+/// An iterator over subslices separated by elements that match a
+/// predicate function, limited to a given number of splits, starting
+/// from the end of the slice.
+///
+/// This struct is created by the [`rsplitn_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = [10, 40, 30, 20, 60, 50];
+/// let iter = slice.rsplitn_mut(2, |num| *num % 3 == 0);
+/// ```
+///
+/// [`rsplitn_mut`]: ../../std/primitive.slice.html#method.rsplitn_mut
+/// [slices]: ../../std/primitive.slice.html
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct RSplitNMut<'a, T: 'a, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ inner: GenericSplitN<RSplitMut<'a, T, P>>,
+}
+
+impl<'a, T: 'a, P: FnMut(&T) -> bool> RSplitNMut<'a, T, P> {
+ #[inline]
+ pub(super) fn new(s: RSplitMut<'a, T, P>, n: usize) -> Self {
+ Self { inner: GenericSplitN { iter: s, count: n } }
+ }
+}
+
+#[stable(feature = "core_impl_debug", since = "1.9.0")]
+impl<T: fmt::Debug, P> fmt::Debug for RSplitNMut<'_, T, P>
+where
+ P: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("RSplitNMut").field("inner", &self.inner).finish()
+ }
+}
+
+forward_iterator! { SplitN: T, &'a [T] }
+forward_iterator! { RSplitN: T, &'a [T] }
+forward_iterator! { SplitNMut: T, &'a mut [T] }
+forward_iterator! { RSplitNMut: T, &'a mut [T] }
+
+/// An iterator over overlapping subslices of length `size`.
+///
+/// This struct is created by the [`windows`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['r', 'u', 's', 't'];
+/// let iter = slice.windows(2);
+/// ```
+///
+/// [`windows`]: ../../std/primitive.slice.html#method.windows
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Windows<'a, T: 'a> {
+ v: &'a [T],
+ size: NonZeroUsize,
+}
+
+impl<'a, T: 'a> Windows<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], size: NonZeroUsize) -> Self {
+ Self { v: slice, size }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Windows<'_, T> {
+ fn clone(&self) -> Self {
+ Windows { v: self.v, size: self.size }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Windows<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.size.get() > self.v.len() {
+ None
+ } else {
+ let ret = Some(&self.v[..self.size.get()]);
+ self.v = &self.v[1..];
+ ret
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.size.get() > self.v.len() {
+ (0, Some(0))
+ } else {
+ let size = self.v.len() - self.size.get() + 1;
+ (size, Some(size))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = self.size.get().overflowing_add(n);
+ if end > self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let nth = &self.v[n..end];
+ self.v = &self.v[n + 1..];
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.size.get() > self.v.len() {
+ None
+ } else {
+ let start = self.v.len() - self.size.get();
+ Some(&self.v[start..])
+ }
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // SAFETY: since the caller guarantees that `i` is in bounds,
+ // which means that `i` cannot overflow an `isize`, and the
+ // slice created by `from_raw_parts` is a subslice of `self.v`
+ // thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(idx), self.size.get()) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Windows<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.size.get() > self.v.len() {
+ None
+ } else {
+ let ret = Some(&self.v[self.v.len() - self.size.get()..]);
+ self.v = &self.v[..self.v.len() - 1];
+ ret
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = self.v.len().overflowing_sub(n);
+ if end < self.size.get() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let ret = &self.v[end - self.size.get()..end];
+ self.v = &self.v[..end - 1];
+ Some(ret)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Windows<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Windows<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Windows<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`chunks`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks(2);
+/// ```
+///
+/// [`chunks`]: ../../std/primitive.slice.html#method.chunks
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Chunks<'a, T: 'a> {
+ v: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T: 'a> Chunks<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Chunks<'_, T> {
+ fn clone(&self) -> Self {
+ Chunks { v: self.v, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Chunks<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let chunksz = cmp::min(self.v.len(), self.chunk_size);
+ let (fst, snd) = self.v.split_at(chunksz);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let end = match start.checked_add(self.chunk_size) {
+ Some(sum) => cmp::min(self.v.len(), sum),
+ None => self.v.len(),
+ };
+ let nth = &self.v[start..end];
+ self.v = &self.v[end..];
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
+ Some(&self.v[start..])
+ }
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ let end = match start.checked_add(self.chunk_size) {
+ None => self.v.len(),
+ Some(end) => cmp::min(end, self.v.len()),
+ };
+ // SAFETY: the caller guarantees that `i` is in bounds,
+ // which means that `start` must be in bounds of the
+ // underlying `self.v` slice, and we made sure that `end`
+ // is also in bounds of `self.v`. Thus, `start` cannot overflow
+ // an `isize`, and the slice constructed by `from_raw_parts`
+ // is a subslice of `self.v` which is guaranteed to be valid
+ // for the lifetime `'a` of `self.v`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Chunks<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
+ let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = match start.checked_add(self.chunk_size) {
+ Some(res) => cmp::min(res, self.v.len()),
+ None => self.v.len(),
+ };
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[..start];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Chunks<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Chunks<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Chunks<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`chunks_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks_mut(2);
+/// ```
+///
+/// [`chunks_mut`]: ../../std/primitive.slice.html#method.chunks_mut
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ChunksMut<'a, T: 'a> {
+ v: &'a mut [T],
+ chunk_size: usize,
+}
+
+impl<'a, T: 'a> ChunksMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for ChunksMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let sz = cmp::min(self.v.len(), self.chunk_size);
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(sz);
+ self.v = tail;
+ Some(head)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ let end = match start.checked_add(self.chunk_size) {
+ Some(sum) => cmp::min(self.v.len(), sum),
+ None => self.v.len(),
+ };
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(end);
+ let (_, nth) = head.split_at_mut(start);
+ self.v = tail;
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
+ Some(&mut self.v[start..])
+ }
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ let end = match start.checked_add(self.chunk_size) {
+ None => self.v.len(),
+ Some(end) => cmp::min(end, self.v.len()),
+ };
+ // SAFETY: see comments for `Chunks::__iterator_get_unchecked`.
+ //
+ // Also note that the caller also guarantees that we're never called
+ // with the same index again, and that no other methods that will
+ // access this subslice are called, so it is valid for the returned
+ // slice to be mutable.
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let sz = if remainder != 0 { remainder } else { self.chunk_size };
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let tmp_len = tmp.len();
+ let (head, tail) = tmp.split_at_mut(tmp_len - sz);
+ self.v = head;
+ Some(tail)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = match start.checked_add(self.chunk_size) {
+ Some(res) => cmp::min(res, self.v.len()),
+ None => self.v.len(),
+ };
+ let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
+ let (head, nth_back) = temp.split_at_mut(start);
+ self.v = head;
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for ChunksMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for ChunksMut<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for ChunksMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `chunk_size-1` elements will be omitted but can be retrieved from
+/// the [`remainder`] function from the iterator.
+///
+/// This struct is created by the [`chunks_exact`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks_exact(2);
+/// ```
+///
+/// [`chunks_exact`]: ../../std/primitive.slice.html#method.chunks_exact
+/// [`remainder`]: ChunksExact::remainder
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub struct ChunksExact<'a, T: 'a> {
+ v: &'a [T],
+ rem: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> ChunksExact<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ let fst_len = slice.len() - rem;
+ // SAFETY: 0 <= fst_len <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_unchecked(fst_len) };
+ Self { v: fst, rem: snd, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ pub fn remainder(&self) -> &'a [T] {
+ self.rem
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> Clone for ChunksExact<'_, T> {
+ fn clone(&self) -> Self {
+ ChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> Iterator for ChunksExact<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.chunk_size);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let (_, snd) = self.v.split_at(start);
+ self.v = snd;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for ChunksExact<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = start + self.chunk_size;
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[..start];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> ExactSizeIterator for ChunksExact<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for ChunksExact<'_, T> {}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> FusedIterator for ChunksExact<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last up to
+/// `chunk_size-1` elements will be omitted but can be retrieved from the
+/// [`into_remainder`] function from the iterator.
+///
+/// This struct is created by the [`chunks_exact_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.chunks_exact_mut(2);
+/// ```
+///
+/// [`chunks_exact_mut`]: ../../std/primitive.slice.html#method.chunks_exact_mut
+/// [`into_remainder`]: ChunksExactMut::into_remainder
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub struct ChunksExactMut<'a, T: 'a> {
+ v: &'a mut [T],
+ rem: &'a mut [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> ChunksExactMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ let fst_len = slice.len() - rem;
+ // SAFETY: 0 <= fst_len <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_mut_unchecked(fst_len) };
+ Self { v: fst, rem: snd, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ pub fn into_remainder(self) -> &'a mut [T] {
+ self.rem
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> Iterator for ChunksExactMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(self.chunk_size);
+ self.v = tail;
+ Some(head)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (start, overflow) = n.overflowing_mul(self.chunk_size);
+ if start >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (_, snd) = tmp.split_at_mut(start);
+ self.v = snd;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let start = idx * self.chunk_size;
+ // SAFETY: see comments for `ChunksMut::__iterator_get_unchecked`.
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for ChunksExactMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let tmp_len = tmp.len();
+ let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
+ self.v = head;
+ Some(tail)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ let start = (len - 1 - n) * self.chunk_size;
+ let end = start + self.chunk_size;
+ let (temp, _tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
+ let (head, nth_back) = temp.split_at_mut(start);
+ self.v = head;
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> ExactSizeIterator for ChunksExactMut<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for ChunksExactMut<'_, T> {}
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+impl<T> FusedIterator for ChunksExactMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// A windowed iterator over a slice in overlapping chunks (`N` elements at a
+/// time), starting at the beginning of the slice
+///
+/// This struct is created by the [`array_windows`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(array_windows)]
+///
+/// let slice = [0, 1, 2, 3];
+/// let iter = slice.array_windows::<2>();
+/// ```
+///
+/// [`array_windows`]: ../../std/primitive.slice.html#method.array_windows
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug, Clone, Copy)]
+#[unstable(feature = "array_windows", issue = "75027")]
+pub struct ArrayWindows<'a, T: 'a, const N: usize> {
+ slice_head: *const T,
+ num: usize,
+ marker: PhantomData<&'a [T; N]>,
+}
+
+impl<'a, T: 'a, const N: usize> ArrayWindows<'a, T, N> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T]) -> Self {
+ let num_windows = slice.len().saturating_sub(N - 1);
+ Self { slice_head: slice.as_ptr(), num: num_windows, marker: PhantomData }
+ }
+}
+
+#[unstable(feature = "array_windows", issue = "75027")]
+impl<'a, T, const N: usize> Iterator for ArrayWindows<'a, T, N> {
+ type Item = &'a [T; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.num == 0 {
+ return None;
+ }
+ // SAFETY:
+ // This is safe because it's indexing into a slice guaranteed to be length > N.
+ let ret = unsafe { &*self.slice_head.cast::<[T; N]>() };
+ // SAFETY: Guaranteed that there are at least 1 item remaining otherwise
+ // earlier branch would've been hit
+ self.slice_head = unsafe { self.slice_head.add(1) };
+
+ self.num -= 1;
+ Some(ret)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.num, Some(self.num))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.num
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if self.num <= n {
+ self.num = 0;
+ return None;
+ }
+ // SAFETY:
+ // This is safe because it's indexing into a slice guaranteed to be length > N.
+ let ret = unsafe { &*self.slice_head.add(n).cast::<[T; N]>() };
+ // SAFETY: Guaranteed that there are at least n items remaining
+ self.slice_head = unsafe { self.slice_head.add(n + 1) };
+
+ self.num -= n + 1;
+ Some(ret)
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.nth(self.num.checked_sub(1)?)
+ }
+}
+
+#[unstable(feature = "array_windows", issue = "75027")]
+impl<'a, T, const N: usize> DoubleEndedIterator for ArrayWindows<'a, T, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T; N]> {
+ if self.num == 0 {
+ return None;
+ }
+ // SAFETY: Guaranteed that there are n items remaining, n-1 for 0-indexing.
+ let ret = unsafe { &*self.slice_head.add(self.num - 1).cast::<[T; N]>() };
+ self.num -= 1;
+ Some(ret)
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<&'a [T; N]> {
+ if self.num <= n {
+ self.num = 0;
+ return None;
+ }
+ // SAFETY: Guaranteed that there are n items remaining, n-1 for 0-indexing.
+ let ret = unsafe { &*self.slice_head.add(self.num - (n + 1)).cast::<[T; N]>() };
+ self.num -= n + 1;
+ Some(ret)
+ }
+}
+
+#[unstable(feature = "array_windows", issue = "75027")]
+impl<T, const N: usize> ExactSizeIterator for ArrayWindows<'_, T, N> {
+ fn is_empty(&self) -> bool {
+ self.num == 0
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`N` elements at a
+/// time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `N-1` elements will be omitted but can be retrieved from
+/// the [`remainder`] function from the iterator.
+///
+/// This struct is created by the [`array_chunks`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(array_chunks)]
+///
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.array_chunks::<2>();
+/// ```
+///
+/// [`array_chunks`]: ../../std/primitive.slice.html#method.array_chunks
+/// [`remainder`]: ArrayChunks::remainder
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub struct ArrayChunks<'a, T: 'a, const N: usize> {
+ iter: Iter<'a, [T; N]>,
+ rem: &'a [T],
+}
+
+impl<'a, T, const N: usize> ArrayChunks<'a, T, N> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T]) -> Self {
+ let (array_slice, rem) = slice.as_chunks();
+ Self { iter: array_slice.iter(), rem }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `N-1`
+ /// elements.
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ pub fn remainder(&self) -> &'a [T] {
+ self.rem
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> Clone for ArrayChunks<'_, T, N> {
+ fn clone(&self) -> Self {
+ ArrayChunks { iter: self.iter.clone(), rem: self.rem }
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> Iterator for ArrayChunks<'a, T, N> {
+ type Item = &'a [T; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T; N]> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.iter.last()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a [T; N] {
+ // SAFETY: The safety guarantees of `__iterator_get_unchecked` are
+ // transferred to the caller.
+ unsafe { self.iter.__iterator_get_unchecked(i) }
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunks<'a, T, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T; N]> {
+ self.iter.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth_back(n)
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> ExactSizeIterator for ArrayChunks<'_, T, N> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, const N: usize> TrustedLen for ArrayChunks<'_, T, N> {}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> FusedIterator for ArrayChunks<'_, T, N> {}
+
+#[doc(hidden)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunks<'a, T, N> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`N` elements
+/// at a time), starting at the beginning of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `N-1` elements will be omitted but can be retrieved from
+/// the [`into_remainder`] function from the iterator.
+///
+/// This struct is created by the [`array_chunks_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// #![feature(array_chunks)]
+///
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.array_chunks_mut::<2>();
+/// ```
+///
+/// [`array_chunks_mut`]: ../../std/primitive.slice.html#method.array_chunks_mut
+/// [`into_remainder`]: ../../std/slice/struct.ArrayChunksMut.html#method.into_remainder
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub struct ArrayChunksMut<'a, T: 'a, const N: usize> {
+ iter: IterMut<'a, [T; N]>,
+ rem: &'a mut [T],
+}
+
+impl<'a, T, const N: usize> ArrayChunksMut<'a, T, N> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T]) -> Self {
+ let (array_slice, rem) = slice.as_chunks_mut();
+ Self { iter: array_slice.iter_mut(), rem }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `N-1`
+ /// elements.
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ pub fn into_remainder(self) -> &'a mut [T] {
+ self.rem
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> Iterator for ArrayChunksMut<'a, T, N> {
+ type Item = &'a mut [T; N];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T; N]> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth(n)
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.iter.last()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> &'a mut [T; N] {
+ // SAFETY: The safety guarantees of `__iterator_get_unchecked` are transferred to
+ // the caller.
+ unsafe { self.iter.__iterator_get_unchecked(i) }
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<'a, T, const N: usize> DoubleEndedIterator for ArrayChunksMut<'a, T, N> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T; N]> {
+ self.iter.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth_back(n)
+ }
+}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> ExactSizeIterator for ArrayChunksMut<'_, T, N> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, const N: usize> TrustedLen for ArrayChunksMut<'_, T, N> {}
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+impl<T, const N: usize> FusedIterator for ArrayChunksMut<'_, T, N> {}
+
+#[doc(hidden)]
+#[unstable(feature = "array_chunks", issue = "74985")]
+unsafe impl<'a, T, const N: usize> TrustedRandomAccess for ArrayChunksMut<'a, T, N> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`rchunks`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks(2);
+/// ```
+///
+/// [`rchunks`]: ../../std/primitive.slice.html#method.rchunks
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub struct RChunks<'a, T: 'a> {
+ v: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T: 'a> RChunks<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> Clone for RChunks<'_, T> {
+ fn clone(&self) -> Self {
+ RChunks { v: self.v, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunks<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let chunksz = cmp::min(self.v.len(), self.chunk_size);
+ let (fst, snd) = self.v.split_at(self.v.len() - chunksz);
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ // Can't underflow because of the check above
+ let end = self.v.len() - end;
+ let start = match end.checked_sub(self.chunk_size) {
+ Some(sum) => sum,
+ None => 0,
+ };
+ let nth = &self.v[start..end];
+ self.v = &self.v[0..start];
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let rem = self.v.len() % self.chunk_size;
+ let end = if rem == 0 { self.chunk_size } else { rem };
+ Some(&self.v[0..end])
+ }
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = match end.checked_sub(self.chunk_size) {
+ None => 0,
+ Some(start) => start,
+ };
+ // SAFETY: mostly identical to `Chunks::__iterator_get_unchecked`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunks<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let chunksz = if remainder != 0 { remainder } else { self.chunk_size };
+ let (fst, snd) = self.v.split_at(chunksz);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ // can't underflow because `n < len`
+ let offset_from_end = (len - 1 - n) * self.chunk_size;
+ let end = self.v.len() - offset_from_end;
+ let start = end.saturating_sub(self.chunk_size);
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[end..];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> ExactSizeIterator for RChunks<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunks<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunks<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last slice
+/// of the iteration will be the remainder.
+///
+/// This struct is created by the [`rchunks_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks_mut(2);
+/// ```
+///
+/// [`rchunks_mut`]: ../../std/primitive.slice.html#method.rchunks_mut
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub struct RChunksMut<'a, T: 'a> {
+ v: &'a mut [T],
+ chunk_size: usize,
+}
+
+impl<'a, T: 'a> RChunksMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], size: usize) -> Self {
+ Self { v: slice, chunk_size: size }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunksMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let sz = cmp::min(self.v.len(), self.chunk_size);
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let tmp_len = tmp.len();
+ let (head, tail) = tmp.split_at_mut(tmp_len - sz);
+ self.v = head;
+ Some(tail)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.v.is_empty() {
+ (0, Some(0))
+ } else {
+ let n = self.v.len() / self.chunk_size;
+ let rem = self.v.len() % self.chunk_size;
+ let n = if rem > 0 { n + 1 } else { n };
+ (n, Some(n))
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ // Can't underflow because of the check above
+ let end = self.v.len() - end;
+ let start = match end.checked_sub(self.chunk_size) {
+ Some(sum) => sum,
+ None => 0,
+ };
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(start);
+ let (nth, _) = tail.split_at_mut(end - start);
+ self.v = head;
+ Some(nth)
+ }
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let rem = self.v.len() % self.chunk_size;
+ let end = if rem == 0 { self.chunk_size } else { rem };
+ Some(&mut self.v[0..end])
+ }
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = match end.checked_sub(self.chunk_size) {
+ None => 0,
+ Some(start) => start,
+ };
+ // SAFETY: see comments for `RChunks::__iterator_get_unchecked` and
+ // `ChunksMut::__iterator_get_unchecked`
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.is_empty() {
+ None
+ } else {
+ let remainder = self.v.len() % self.chunk_size;
+ let sz = if remainder != 0 { remainder } else { self.chunk_size };
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(sz);
+ self.v = tail;
+ Some(head)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ // can't underflow because `n < len`
+ let offset_from_end = (len - 1 - n) * self.chunk_size;
+ let end = self.v.len() - offset_from_end;
+ let start = end.saturating_sub(self.chunk_size);
+ let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
+ let (_, nth_back) = tmp.split_at_mut(start);
+ self.v = tail;
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> ExactSizeIterator for RChunksMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunksMut<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunksMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a
+/// time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last
+/// up to `chunk_size-1` elements will be omitted but can be retrieved from
+/// the [`remainder`] function from the iterator.
+///
+/// This struct is created by the [`rchunks_exact`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks_exact(2);
+/// ```
+///
+/// [`rchunks_exact`]: ../../std/primitive.slice.html#method.rchunks_exact
+/// [`remainder`]: ChunksExact::remainder
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub struct RChunksExact<'a, T: 'a> {
+ v: &'a [T],
+ rem: &'a [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> RChunksExact<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ // SAFETY: 0 <= rem <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_unchecked(rem) };
+ Self { v: snd, rem: fst, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ pub fn remainder(&self) -> &'a [T] {
+ self.rem
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Clone for RChunksExact<'a, T> {
+ fn clone(&self) -> RChunksExact<'a, T> {
+ RChunksExact { v: self.v, rem: self.rem, chunk_size: self.chunk_size }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunksExact<'a, T> {
+ type Item = &'a [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
+ self.v = fst;
+ Some(snd)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &[];
+ None
+ } else {
+ let (fst, _) = self.v.split_at(self.v.len() - end);
+ self.v = fst;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = end - self.chunk_size;
+ // SAFETY:
+ // SAFETY: mostmy identical to `Chunks::__iterator_get_unchecked`.
+ unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunksExact<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let (fst, snd) = self.v.split_at(self.chunk_size);
+ self.v = snd;
+ Some(fst)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &[];
+ None
+ } else {
+ // now that we know that `n` corresponds to a chunk,
+ // none of these operations can underflow/overflow
+ let offset = (len - n) * self.chunk_size;
+ let start = self.v.len() - offset;
+ let end = start + self.chunk_size;
+ let nth_back = &self.v[start..end];
+ self.v = &self.v[end..];
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunksExact<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunksExact<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size`
+/// elements at a time), starting at the end of the slice.
+///
+/// When the slice len is not evenly divided by the chunk size, the last up to
+/// `chunk_size-1` elements will be omitted but can be retrieved from the
+/// [`into_remainder`] function from the iterator.
+///
+/// This struct is created by the [`rchunks_exact_mut`] method on [slices].
+///
+/// # Example
+///
+/// ```
+/// let mut slice = ['l', 'o', 'r', 'e', 'm'];
+/// let iter = slice.rchunks_exact_mut(2);
+/// ```
+///
+/// [`rchunks_exact_mut`]: ../../std/primitive.slice.html#method.rchunks_exact_mut
+/// [`into_remainder`]: ChunksExactMut::into_remainder
+/// [slices]: ../../std/primitive.slice.html
+#[derive(Debug)]
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub struct RChunksExactMut<'a, T: 'a> {
+ v: &'a mut [T],
+ rem: &'a mut [T],
+ chunk_size: usize,
+}
+
+impl<'a, T> RChunksExactMut<'a, T> {
+ #[inline]
+ pub(super) fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
+ let rem = slice.len() % chunk_size;
+ // SAFETY: 0 <= rem <= slice.len() by construction above
+ let (fst, snd) = unsafe { slice.split_at_mut_unchecked(rem) };
+ Self { v: snd, rem: fst, chunk_size }
+ }
+
+ /// Returns the remainder of the original slice that is not going to be
+ /// returned by the iterator. The returned slice has at most `chunk_size-1`
+ /// elements.
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ pub fn into_remainder(self) -> &'a mut [T] {
+ self.rem
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> Iterator for RChunksExactMut<'a, T> {
+ type Item = &'a mut [T];
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let tmp_len = tmp.len();
+ let (head, tail) = tmp.split_at_mut(tmp_len - self.chunk_size);
+ self.v = head;
+ Some(tail)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let n = self.v.len() / self.chunk_size;
+ (n, Some(n))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
+ let (end, overflow) = n.overflowing_mul(self.chunk_size);
+ if end >= self.v.len() || overflow {
+ self.v = &mut [];
+ None
+ } else {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let tmp_len = tmp.len();
+ let (fst, _) = tmp.split_at_mut(tmp_len - end);
+ self.v = fst;
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ let end = self.v.len() - idx * self.chunk_size;
+ let start = end - self.chunk_size;
+ // SAFETY: see comments for `RChunksMut::__iterator_get_unchecked`.
+ unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut [T]> {
+ if self.v.len() < self.chunk_size {
+ None
+ } else {
+ let tmp = mem::replace(&mut self.v, &mut []);
+ let (head, tail) = tmp.split_at_mut(self.chunk_size);
+ self.v = tail;
+ Some(head)
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ let len = self.len();
+ if n >= len {
+ self.v = &mut [];
+ None
+ } else {
+ // now that we know that `n` corresponds to a chunk,
+ // none of these operations can underflow/overflow
+ let offset = (len - n) * self.chunk_size;
+ let start = self.v.len() - offset;
+ let end = start + self.chunk_size;
+ let (tmp, tail) = mem::replace(&mut self.v, &mut []).split_at_mut(end);
+ let (_, nth_back) = tmp.split_at_mut(start);
+ self.v = tail;
+ Some(nth_back)
+ }
+ }
+}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> ExactSizeIterator for RChunksExactMut<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.v.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for RChunksExactMut<'_, T> {}
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+impl<T> FusedIterator for RChunksExactMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
--- /dev/null
+//! Macros used by iterators of slice.
+
+// Inlining is_empty and len makes a huge performance difference
+macro_rules! is_empty {
+ // The way we encode the length of a ZST iterator, this works both for ZST
+ // and non-ZST.
+ ($self: ident) => {
+ $self.ptr.as_ptr() as *const T == $self.end
+ };
+}
+
+// To get rid of some bounds checks (see `position`), we compute the length in a somewhat
+// unexpected way. (Tested by `codegen/slice-position-bounds-check`.)
+macro_rules! len {
+ ($self: ident) => {{
+ #![allow(unused_unsafe)] // we're sometimes used within an unsafe block
+
+ let start = $self.ptr;
+ let size = size_from_ptr(start.as_ptr());
+ if size == 0 {
+ // This _cannot_ use `unchecked_sub` because we depend on wrapping
+ // to represent the length of long ZST slice iterators.
+ ($self.end as usize).wrapping_sub(start.as_ptr() as usize)
+ } else {
+ // We know that `start <= end`, so can do better than `offset_from`,
+ // which needs to deal in signed. By setting appropriate flags here
+ // we can tell LLVM this, which helps it remove bounds checks.
+ // SAFETY: By the type invariant, `start <= end`
+ let diff = unsafe { unchecked_sub($self.end as usize, start.as_ptr() as usize) };
+ // By also telling LLVM that the pointers are apart by an exact
+ // multiple of the type size, it can optimize `len() == 0` down to
+ // `start == end` instead of `(end - start) < size`.
+ // SAFETY: By the type invariant, the pointers are aligned so the
+ // distance between them must be a multiple of pointee size
+ unsafe { exact_div(diff, size) }
+ }
+ }};
+}
+
+// The shared definition of the `Iter` and `IterMut` iterators
+macro_rules! iterator {
+ (
+ struct $name:ident -> $ptr:ty,
+ $elem:ty,
+ $raw_mut:tt,
+ {$( $mut_:tt )?},
+ {$($extra:tt)*}
+ ) => {
+ // Returns the first element and moves the start of the iterator forwards by 1.
+ // Greatly improves performance compared to an inlined function. The iterator
+ // must not be empty.
+ macro_rules! next_unchecked {
+ ($self: ident) => {& $( $mut_ )? *$self.post_inc_start(1)}
+ }
+
+ // Returns the last element and moves the end of the iterator backwards by 1.
+ // Greatly improves performance compared to an inlined function. The iterator
+ // must not be empty.
+ macro_rules! next_back_unchecked {
+ ($self: ident) => {& $( $mut_ )? *$self.pre_dec_end(1)}
+ }
+
+ // Shrinks the iterator when T is a ZST, by moving the end of the iterator
+ // backwards by `n`. `n` must not exceed `self.len()`.
+ macro_rules! zst_shrink {
+ ($self: ident, $n: ident) => {
+ $self.end = ($self.end as * $raw_mut u8).wrapping_offset(-$n) as * $raw_mut T;
+ }
+ }
+
+ impl<'a, T> $name<'a, T> {
+ // Helper function for creating a slice from the iterator.
+ #[inline(always)]
+ fn make_slice(&self) -> &'a [T] {
+ // SAFETY: the iterator was created from a slice with pointer
+ // `self.ptr` and length `len!(self)`. This guarantees that all
+ // the prerequisites for `from_raw_parts` are fulfilled.
+ unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
+ }
+
+ // Helper function for moving the start of the iterator forwards by `offset` elements,
+ // returning the old start.
+ // Unsafe because the offset must not exceed `self.len()`.
+ #[inline(always)]
+ unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T {
+ if mem::size_of::<T>() == 0 {
+ zst_shrink!(self, offset);
+ self.ptr.as_ptr()
+ } else {
+ let old = self.ptr.as_ptr();
+ // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+ // so this new pointer is inside `self` and thus guaranteed to be non-null.
+ self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
+ old
+ }
+ }
+
+ // Helper function for moving the end of the iterator backwards by `offset` elements,
+ // returning the new end.
+ // Unsafe because the offset must not exceed `self.len()`.
+ #[inline(always)]
+ unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T {
+ if mem::size_of::<T>() == 0 {
+ zst_shrink!(self, offset);
+ self.ptr.as_ptr()
+ } else {
+ // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+ // which is guaranteed to not overflow an `isize`. Also, the resulting pointer
+ // is in bounds of `slice`, which fulfills the other requirements for `offset`.
+ self.end = unsafe { self.end.offset(-offset) };
+ self.end
+ }
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<T> ExactSizeIterator for $name<'_, T> {
+ #[inline(always)]
+ fn len(&self) -> usize {
+ len!(self)
+ }
+
+ #[inline(always)]
+ fn is_empty(&self) -> bool {
+ is_empty!(self)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<'a, T> Iterator for $name<'a, T> {
+ type Item = $elem;
+
+ #[inline]
+ fn next(&mut self) -> Option<$elem> {
+ // could be implemented with slices, but this avoids bounds checks
+
+ // SAFETY: `assume` calls are safe since a slice's start pointer
+ // must be non-null, and slices over non-ZSTs must also have a
+ // non-null end pointer. The call to `next_unchecked!` is safe
+ // since we check if the iterator is empty first.
+ unsafe {
+ assume(!self.ptr.as_ptr().is_null());
+ if mem::size_of::<T>() != 0 {
+ assume(!self.end.is_null());
+ }
+ if is_empty!(self) {
+ None
+ } else {
+ Some(next_unchecked!(self))
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = len!(self);
+ (exact, Some(exact))
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ len!(self)
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<$elem> {
+ if n >= len!(self) {
+ // This iterator is now empty.
+ if mem::size_of::<T>() == 0 {
+ // We have to do it this way as `ptr` may never be 0, but `end`
+ // could be (due to wrapping).
+ self.end = self.ptr.as_ptr();
+ } else {
+ // SAFETY: end can't be 0 if T isn't ZST because ptr isn't 0 and end >= ptr
+ unsafe {
+ self.ptr = NonNull::new_unchecked(self.end as *mut T);
+ }
+ }
+ return None;
+ }
+ // SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
+ unsafe {
+ self.post_inc_start(n as isize);
+ Some(next_unchecked!(self))
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<$elem> {
+ self.next_back()
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn for_each<F>(mut self, mut f: F)
+ where
+ Self: Sized,
+ F: FnMut(Self::Item),
+ {
+ while let Some(x) = self.next() {
+ f(x);
+ }
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn all<F>(&mut self, mut f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ while let Some(x) = self.next() {
+ if !f(x) {
+ return false;
+ }
+ }
+ true
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn any<F>(&mut self, mut f: F) -> bool
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> bool,
+ {
+ while let Some(x) = self.next() {
+ if f(x) {
+ return true;
+ }
+ }
+ false
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
+ where
+ Self: Sized,
+ P: FnMut(&Self::Item) -> bool,
+ {
+ while let Some(x) = self.next() {
+ if predicate(&x) {
+ return Some(x);
+ }
+ }
+ None
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile.
+ #[inline]
+ fn find_map<B, F>(&mut self, mut f: F) -> Option<B>
+ where
+ Self: Sized,
+ F: FnMut(Self::Item) -> Option<B>,
+ {
+ while let Some(x) = self.next() {
+ if let Some(y) = f(x) {
+ return Some(y);
+ }
+ }
+ None
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile. Also, the `assume` avoids a bounds check.
+ #[inline]
+ #[rustc_inherit_overflow_checks]
+ fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
+ Self: Sized,
+ P: FnMut(Self::Item) -> bool,
+ {
+ let n = len!(self);
+ let mut i = 0;
+ while let Some(x) = self.next() {
+ if predicate(x) {
+ // SAFETY: we are guaranteed to be in bounds by the loop invariant:
+ // when `i >= n`, `self.next()` returns `None` and the loop breaks.
+ unsafe { assume(i < n) };
+ return Some(i);
+ }
+ i += 1;
+ }
+ None
+ }
+
+ // We override the default implementation, which uses `try_fold`,
+ // because this simple implementation generates less LLVM IR and is
+ // faster to compile. Also, the `assume` avoids a bounds check.
+ #[inline]
+ fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
+ P: FnMut(Self::Item) -> bool,
+ Self: Sized + ExactSizeIterator + DoubleEndedIterator
+ {
+ let n = len!(self);
+ let mut i = n;
+ while let Some(x) = self.next_back() {
+ i -= 1;
+ if predicate(x) {
+ // SAFETY: `i` must be lower than `n` since it starts at `n`
+ // and is only decreasing.
+ unsafe { assume(i < n) };
+ return Some(i);
+ }
+ }
+ None
+ }
+
+ #[doc(hidden)]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // SAFETY: the caller must guarantee that `i` is in bounds of
+ // the underlying slice, so `i` cannot overflow an `isize`, and
+ // the returned references is guaranteed to refer to an element
+ // of the slice and thus guaranteed to be valid.
+ //
+ // Also note that the caller also guarantees that we're never
+ // called with the same index again, and that no other methods
+ // that will access this subslice are called, so it is valid
+ // for the returned reference to be mutable in the case of
+ // `IterMut`
+ unsafe { & $( $mut_ )? * self.ptr.as_ptr().add(idx) }
+ }
+
+ $($extra)*
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<'a, T> DoubleEndedIterator for $name<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<$elem> {
+ // could be implemented with slices, but this avoids bounds checks
+
+ // SAFETY: `assume` calls are safe since a slice's start pointer must be non-null,
+ // and slices over non-ZSTs must also have a non-null end pointer.
+ // The call to `next_back_unchecked!` is safe since we check if the iterator is
+ // empty first.
+ unsafe {
+ assume(!self.ptr.as_ptr().is_null());
+ if mem::size_of::<T>() != 0 {
+ assume(!self.end.is_null());
+ }
+ if is_empty!(self) {
+ None
+ } else {
+ Some(next_back_unchecked!(self))
+ }
+ }
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<$elem> {
+ if n >= len!(self) {
+ // This iterator is now empty.
+ self.end = self.ptr.as_ptr();
+ return None;
+ }
+ // SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
+ unsafe {
+ self.pre_dec_end(n as isize);
+ Some(next_back_unchecked!(self))
+ }
+ }
+ }
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<T> FusedIterator for $name<'_, T> {}
+
+ #[unstable(feature = "trusted_len", issue = "37572")]
+ unsafe impl<T> TrustedLen for $name<'_, T> {}
+ }
+}
+
+macro_rules! forward_iterator {
+ ($name:ident: $elem:ident, $iter_of:ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<'a, $elem, P> Iterator for $name<'a, $elem, P>
+ where
+ P: FnMut(&T) -> bool,
+ {
+ type Item = $iter_of;
+
+ #[inline]
+ fn next(&mut self) -> Option<$iter_of> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+ }
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> where P: FnMut(&T) -> bool {}
+ };
+}
--- /dev/null
+// Original implementation taken from rust-memchr.
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+// ignore-tidy-undocumented-unsafe
+
+use crate::cmp;
+use crate::mem;
+
+const LO_U64: u64 = 0x0101010101010101;
+const HI_U64: u64 = 0x8080808080808080;
+
+// Use truncation.
+const LO_USIZE: usize = LO_U64 as usize;
+const HI_USIZE: usize = HI_U64 as usize;
+const USIZE_BYTES: usize = mem::size_of::<usize>();
+
+/// Returns `true` if `x` contains any zero byte.
+///
+/// From *Matters Computational*, J. Arndt:
+///
+/// "The idea is to subtract one from each of the bytes and then look for
+/// bytes where the borrow propagated all the way to the most significant
+/// bit."
+#[inline]
+fn contains_zero_byte(x: usize) -> bool {
+ x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
+}
+
+#[cfg(target_pointer_width = "16")]
+#[inline]
+fn repeat_byte(b: u8) -> usize {
+ (b as usize) << 8 | b as usize
+}
+
+#[cfg(not(target_pointer_width = "16"))]
+#[inline]
+fn repeat_byte(b: u8) -> usize {
+ (b as usize) * (usize::MAX / 255)
+}
+
+/// Returns the first index matching the byte `x` in `text`.
+#[inline]
+pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Fast path for small slices
+ if text.len() < 2 * USIZE_BYTES {
+ return text.iter().position(|elt| *elt == x);
+ }
+
+ memchr_general_case(x, text)
+}
+
+fn memchr_general_case(x: u8, text: &[u8]) -> Option<usize> {
+ // Scan for a single byte value by reading two `usize` words at a time.
+ //
+ // Split `text` in three parts
+ // - unaligned initial part, before the first word aligned address in text
+ // - body, scan by 2 words at a time
+ // - the last remaining part, < 2 word size
+
+ // search up to an aligned boundary
+ let len = text.len();
+ let ptr = text.as_ptr();
+ let mut offset = ptr.align_offset(USIZE_BYTES);
+
+ if offset > 0 {
+ offset = cmp::min(offset, len);
+ if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
+ return Some(index);
+ }
+ }
+
+ // search the body of the text
+ let repeated_x = repeat_byte(x);
+ while offset <= len - 2 * USIZE_BYTES {
+ unsafe {
+ let u = *(ptr.add(offset) as *const usize);
+ let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
+
+ // break if there is a matching byte
+ let zu = contains_zero_byte(u ^ repeated_x);
+ let zv = contains_zero_byte(v ^ repeated_x);
+ if zu || zv {
+ break;
+ }
+ }
+ offset += USIZE_BYTES * 2;
+ }
+
+ // Find the byte after the point the body loop stopped.
+ text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
+}
+
+/// Returns the last index matching the byte `x` in `text`.
+pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Scan for a single byte value by reading two `usize` words at a time.
+ //
+ // Split `text` in three parts:
+ // - unaligned tail, after the last word aligned address in text,
+ // - body, scanned by 2 words at a time,
+ // - the first remaining bytes, < 2 word size.
+ let len = text.len();
+ let ptr = text.as_ptr();
+ type Chunk = usize;
+
+ let (min_aligned_offset, max_aligned_offset) = {
+ // We call this just to obtain the length of the prefix and suffix.
+ // In the middle we always process two chunks at once.
+ let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() };
+ (prefix.len(), len - suffix.len())
+ };
+
+ let mut offset = max_aligned_offset;
+ if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
+ return Some(offset + index);
+ }
+
+ // Search the body of the text, make sure we don't cross min_aligned_offset.
+ // offset is always aligned, so just testing `>` is sufficient and avoids possible
+ // overflow.
+ let repeated_x = repeat_byte(x);
+ let chunk_bytes = mem::size_of::<Chunk>();
+
+ while offset > min_aligned_offset {
+ unsafe {
+ let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk);
+ let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk);
+
+ // Break if there is a matching byte.
+ let zu = contains_zero_byte(u ^ repeated_x);
+ let zv = contains_zero_byte(v ^ repeated_x);
+ if zu || zv {
+ break;
+ }
+ }
+ offset -= 2 * chunk_bytes;
+ }
+
+ // Find the byte before the point the body loop stopped.
+ text[..offset].iter().rposition(|elt| *elt == x)
+}
--- /dev/null
+// ignore-tidy-filelength
+
+//! Slice management and manipulation.
+//!
+//! For more details see [`std::slice`].
+//!
+//! [`std::slice`]: ../../std/slice/index.html
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use crate::cmp::Ordering::{self, Equal, Greater, Less};
+use crate::marker::Copy;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ops::{FnMut, Range, RangeBounds};
+use crate::option::Option;
+use crate::option::Option::{None, Some};
+use crate::ptr;
+use crate::result::Result;
+use crate::result::Result::{Err, Ok};
+
+#[unstable(
+ feature = "slice_internals",
+ issue = "none",
+ reason = "exposed from core to be reused in std; use the memchr crate"
+)]
+/// Pure rust memchr implementation, taken from rust-memchr
+pub mod memchr;
+
+mod ascii;
+mod cmp;
+pub(crate) mod index;
+mod iter;
+mod raw;
+mod rotate;
+mod sort;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{Chunks, ChunksMut, Windows};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{Iter, IterMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{RSplitN, RSplitNMut, Split, SplitMut, SplitN, SplitNMut};
+
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+pub use iter::{RSplit, RSplitMut};
+
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub use iter::{ChunksExact, ChunksExactMut};
+
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub use iter::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
+
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use iter::{ArrayChunks, ArrayChunksMut};
+
+#[unstable(feature = "array_windows", issue = "75027")]
+pub use iter::ArrayWindows;
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+pub use iter::{SplitInclusive, SplitInclusiveMut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use raw::{from_raw_parts, from_raw_parts_mut};
+
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub use raw::{from_mut, from_ref};
+
+// This function is public only because there is no other way to unit test heapsort.
+#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
+pub use sort::heapsort;
+
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+pub use index::SliceIndex;
+
+#[lang = "slice"]
+#[cfg(not(test))]
+impl<T> [T] {
+ /// Returns the number of elements in the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert_eq!(a.len(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_len", since = "1.32.0")]
+ #[inline]
+ // SAFETY: const sound because we transmute out the length field as a usize (which it must be)
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_union))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_union))]
+ pub const fn len(&self) -> usize {
+ // SAFETY: this is safe because `&[T]` and `FatPtr<T>` have the same layout.
+ // Only `std` can make this guarantee.
+ unsafe { crate::ptr::Repr { rust: self }.raw.len }
+ }
+
+ /// Returns `true` if the slice has a length of 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// assert!(!a.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_is_empty", since = "1.32.0")]
+ #[inline]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Returns the first element of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert_eq!(Some(&10), v.first());
+ ///
+ /// let w: &[i32] = &[];
+ /// assert_eq!(None, w.first());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn first(&self) -> Option<&T> {
+ if let [first, ..] = self { Some(first) } else { None }
+ }
+
+ /// Returns a mutable pointer to the first element of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some(first) = x.first_mut() {
+ /// *first = 5;
+ /// }
+ /// assert_eq!(x, &[5, 1, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn first_mut(&mut self) -> Option<&mut T> {
+ if let [first, ..] = self { Some(first) } else { None }
+ }
+
+ /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[0, 1, 2];
+ ///
+ /// if let Some((first, elements)) = x.split_first() {
+ /// assert_eq!(first, &0);
+ /// assert_eq!(elements, &[1, 2]);
+ /// }
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[inline]
+ pub fn split_first(&self) -> Option<(&T, &[T])> {
+ if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
+ }
+
+ /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some((first, elements)) = x.split_first_mut() {
+ /// *first = 3;
+ /// elements[0] = 4;
+ /// elements[1] = 5;
+ /// }
+ /// assert_eq!(x, &[3, 4, 5]);
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[inline]
+ pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
+ }
+
+ /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[0, 1, 2];
+ ///
+ /// if let Some((last, elements)) = x.split_last() {
+ /// assert_eq!(last, &2);
+ /// assert_eq!(elements, &[0, 1]);
+ /// }
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[inline]
+ pub fn split_last(&self) -> Option<(&T, &[T])> {
+ if let [init @ .., last] = self { Some((last, init)) } else { None }
+ }
+
+ /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some((last, elements)) = x.split_last_mut() {
+ /// *last = 3;
+ /// elements[0] = 4;
+ /// elements[1] = 5;
+ /// }
+ /// assert_eq!(x, &[4, 5, 3]);
+ /// ```
+ #[stable(feature = "slice_splits", since = "1.5.0")]
+ #[inline]
+ pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
+ if let [init @ .., last] = self { Some((last, init)) } else { None }
+ }
+
+ /// Returns the last element of the slice, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert_eq!(Some(&30), v.last());
+ ///
+ /// let w: &[i32] = &[];
+ /// assert_eq!(None, w.last());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn last(&self) -> Option<&T> {
+ if let [.., last] = self { Some(last) } else { None }
+ }
+
+ /// Returns a mutable pointer to the last item in the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some(last) = x.last_mut() {
+ /// *last = 10;
+ /// }
+ /// assert_eq!(x, &[0, 1, 10]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn last_mut(&mut self) -> Option<&mut T> {
+ if let [.., last] = self { Some(last) } else { None }
+ }
+
+ /// Returns a reference to an element or subslice depending on the type of
+ /// index.
+ ///
+ /// - If given a position, returns a reference to the element at that
+ /// position or `None` if out of bounds.
+ /// - If given a range, returns the subslice corresponding to that range,
+ /// or `None` if out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert_eq!(Some(&40), v.get(1));
+ /// assert_eq!(Some(&[10, 40][..]), v.get(0..2));
+ /// assert_eq!(None, v.get(3));
+ /// assert_eq!(None, v.get(0..4));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn get<I>(&self, index: I) -> Option<&I::Output>
+ where
+ I: SliceIndex<Self>,
+ {
+ index.get(self)
+ }
+
+ /// Returns a mutable reference to an element or subslice depending on the
+ /// type of index (see [`get`]) or `None` if the index is out of bounds.
+ ///
+ /// [`get`]: #method.get
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [0, 1, 2];
+ ///
+ /// if let Some(elem) = x.get_mut(1) {
+ /// *elem = 42;
+ /// }
+ /// assert_eq!(x, &[0, 42, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
+ where
+ I: SliceIndex<Self>,
+ {
+ index.get_mut(self)
+ }
+
+ /// Returns a reference to an element or subslice, without doing bounds
+ /// checking.
+ ///
+ /// For a safe alternative see [`get`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used.
+ ///
+ /// [`get`]: #method.get
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[1, 2, 4];
+ ///
+ /// unsafe {
+ /// assert_eq!(x.get_unchecked(1), &2);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
+ where
+ I: SliceIndex<Self>,
+ {
+ // SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
+ // the slice is dereferencable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &*index.get_unchecked(self) }
+ }
+
+ /// Returns a mutable reference to an element or subslice, without doing
+ /// bounds checking.
+ ///
+ /// For a safe alternative see [`get_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used.
+ ///
+ /// [`get_mut`]: #method.get_mut
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [1, 2, 4];
+ ///
+ /// unsafe {
+ /// let elem = x.get_unchecked_mut(1);
+ /// *elem = 13;
+ /// }
+ /// assert_eq!(x, &[1, 13, 4]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
+ where
+ I: SliceIndex<Self>,
+ {
+ // SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
+ // the slice is dereferencable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &mut *index.get_unchecked_mut(self) }
+ }
+
+ /// Returns a raw pointer to the slice's buffer.
+ ///
+ /// The caller must ensure that the slice outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ ///
+ /// The caller must also ensure that the memory the pointer (non-transitively) points to
+ /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+ /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ ///
+ /// Modifying the container referenced by this slice may cause its buffer
+ /// to be reallocated, which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[1, 2, 4];
+ /// let x_ptr = x.as_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i));
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: #method.as_mut_ptr
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
+ #[inline]
+ pub const fn as_ptr(&self) -> *const T {
+ self as *const [T] as *const T
+ }
+
+ /// Returns an unsafe mutable pointer to the slice's buffer.
+ ///
+ /// The caller must ensure that the slice outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ ///
+ /// Modifying the container referenced by this slice may cause its buffer
+ /// to be reallocated, which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [1, 2, 4];
+ /// let x_ptr = x.as_mut_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// *x_ptr.add(i) += 2;
+ /// }
+ /// }
+ /// assert_eq!(x, &[3, 4, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn as_mut_ptr(&mut self) -> *mut T {
+ self as *mut [T] as *mut T
+ }
+
+ /// Returns the two raw pointers spanning the slice.
+ ///
+ /// The returned range is half-open, which means that the end pointer
+ /// points *one past* the last element of the slice. This way, an empty
+ /// slice is represented by two equal pointers, and the difference between
+ /// the two pointers represents the size of the slice.
+ ///
+ /// See [`as_ptr`] for warnings on using these pointers. The end pointer
+ /// requires extra caution, as it does not point to a valid element in the
+ /// slice.
+ ///
+ /// This function is useful for interacting with foreign interfaces which
+ /// use two pointers to refer to a range of elements in memory, as is
+ /// common in C++.
+ ///
+ /// It can also be useful to check if a pointer to an element refers to an
+ /// element of this slice:
+ ///
+ /// ```
+ /// let a = [1, 2, 3];
+ /// let x = &a[1] as *const _;
+ /// let y = &5 as *const _;
+ ///
+ /// assert!(a.as_ptr_range().contains(&x));
+ /// assert!(!a.as_ptr_range().contains(&y));
+ /// ```
+ ///
+ /// [`as_ptr`]: #method.as_ptr
+ #[stable(feature = "slice_ptr_range", since = "1.48.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn as_ptr_range(&self) -> Range<*const T> {
+ let start = self.as_ptr();
+ // SAFETY: The `add` here is safe, because:
+ //
+ // - Both pointers are part of the same object, as pointing directly
+ // past the object also counts.
+ //
+ // - The size of the slice is never larger than isize::MAX bytes, as
+ // noted here:
+ // - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447
+ // - https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ // - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety
+ // (This doesn't seem normative yet, but the very same assumption is
+ // made in many places, including the Index implementation of slices.)
+ //
+ // - There is no wrapping around involved, as slices do not wrap past
+ // the end of the address space.
+ //
+ // See the documentation of pointer::add.
+ let end = unsafe { start.add(self.len()) };
+ start..end
+ }
+
+ /// Returns the two unsafe mutable pointers spanning the slice.
+ ///
+ /// The returned range is half-open, which means that the end pointer
+ /// points *one past* the last element of the slice. This way, an empty
+ /// slice is represented by two equal pointers, and the difference between
+ /// the two pointers represents the size of the slice.
+ ///
+ /// See [`as_mut_ptr`] for warnings on using these pointers. The end
+ /// pointer requires extra caution, as it does not point to a valid element
+ /// in the slice.
+ ///
+ /// This function is useful for interacting with foreign interfaces which
+ /// use two pointers to refer to a range of elements in memory, as is
+ /// common in C++.
+ ///
+ /// [`as_mut_ptr`]: #method.as_mut_ptr
+ #[stable(feature = "slice_ptr_range", since = "1.48.0")]
+ #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[inline]
+ pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
+ let start = self.as_mut_ptr();
+ // SAFETY: See as_ptr_range() above for why `add` here is safe.
+ let end = unsafe { start.add(self.len()) };
+ start..end
+ }
+
+ /// Swaps two elements in the slice.
+ ///
+ /// # Arguments
+ ///
+ /// * a - The index of the first element
+ /// * b - The index of the second element
+ ///
+ /// # Panics
+ ///
+ /// Panics if `a` or `b` are out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = ["a", "b", "c", "d"];
+ /// v.swap(1, 3);
+ /// assert!(v == ["a", "d", "c", "b"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn swap(&mut self, a: usize, b: usize) {
+ // Can't take two mutable loans from one vector, so instead just cast
+ // them to their raw pointers to do the swap.
+ let pa: *mut T = &mut self[a];
+ let pb: *mut T = &mut self[b];
+ // SAFETY: `pa` and `pb` have been created from safe mutable references and refer
+ // to elements in the slice and therefore are guaranteed to be valid and aligned.
+ // Note that accessing the elements behind `a` and `b` is checked and will
+ // panic when out of bounds.
+ unsafe {
+ ptr::swap(pa, pb);
+ }
+ }
+
+ /// Reverses the order of elements in the slice, in place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [1, 2, 3];
+ /// v.reverse();
+ /// assert!(v == [3, 2, 1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn reverse(&mut self) {
+ let mut i: usize = 0;
+ let ln = self.len();
+
+ // For very small types, all the individual reads in the normal
+ // path perform poorly. We can do better, given efficient unaligned
+ // load/store, by loading a larger chunk and reversing a register.
+
+ // Ideally LLVM would do this for us, as it knows better than we do
+ // whether unaligned reads are efficient (since that changes between
+ // different ARM versions, for example) and what the best chunk size
+ // would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
+ // the loop, so we need to do this ourselves. (Hypothesis: reverse
+ // is troublesome because the sides can be aligned differently --
+ // will be, when the length is odd -- so there's no way of emitting
+ // pre- and postludes to use fully-aligned SIMD in the middle.)
+
+ let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64"));
+
+ if fast_unaligned && mem::size_of::<T>() == 1 {
+ // Use the llvm.bswap intrinsic to reverse u8s in a usize
+ let chunk = mem::size_of::<usize>();
+ while i + chunk - 1 < ln / 2 {
+ // SAFETY: There are several things to check here:
+ //
+ // - Note that `chunk` is either 4 or 8 due to the cfg check
+ // above. So `chunk - 1` is positive.
+ // - Indexing with index `i` is fine as the loop check guarantees
+ // `i + chunk - 1 < ln / 2`
+ // <=> `i < ln / 2 - (chunk - 1) < ln / 2 < ln`.
+ // - Indexing with index `ln - i - chunk = ln - (i + chunk)` is fine:
+ // - `i + chunk > 0` is trivially true.
+ // - The loop check guarantees:
+ // `i + chunk - 1 < ln / 2`
+ // <=> `i + chunk ≤ ln / 2 ≤ ln`, thus subtraction does not underflow.
+ // - The `read_unaligned` and `write_unaligned` calls are fine:
+ // - `pa` points to index `i` where `i < ln / 2 - (chunk - 1)`
+ // (see above) and `pb` points to index `ln - i - chunk`, so
+ // both are at least `chunk`
+ // many bytes away from the end of `self`.
+ // - Any initialized memory is valid `usize`.
+ unsafe {
+ let pa: *mut T = self.get_unchecked_mut(i);
+ let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
+ let va = ptr::read_unaligned(pa as *mut usize);
+ let vb = ptr::read_unaligned(pb as *mut usize);
+ ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
+ ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
+ }
+ i += chunk;
+ }
+ }
+
+ if fast_unaligned && mem::size_of::<T>() == 2 {
+ // Use rotate-by-16 to reverse u16s in a u32
+ let chunk = mem::size_of::<u32>() / 2;
+ while i + chunk - 1 < ln / 2 {
+ // SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln`
+ // (and obviously `i < ln`), because each element is 2 bytes and
+ // we're reading 4.
+ //
+ // `i + chunk - 1 < ln / 2` # while condition
+ // `i + 2 - 1 < ln / 2`
+ // `i + 1 < ln / 2`
+ //
+ // Since it's less than the length divided by 2, then it must be
+ // in bounds.
+ //
+ // This also means that the condition `0 < i + chunk <= ln` is
+ // always respected, ensuring the `pb` pointer can be used
+ // safely.
+ unsafe {
+ let pa: *mut T = self.get_unchecked_mut(i);
+ let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
+ let va = ptr::read_unaligned(pa as *mut u32);
+ let vb = ptr::read_unaligned(pb as *mut u32);
+ ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
+ ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
+ }
+ i += chunk;
+ }
+ }
+
+ while i < ln / 2 {
+ // SAFETY: `i` is inferior to half the length of the slice so
+ // accessing `i` and `ln - i - 1` is safe (`i` starts at 0 and
+ // will not go further than `ln / 2 - 1`).
+ // The resulting pointers `pa` and `pb` are therefore valid and
+ // aligned, and can be read from and written to.
+ unsafe {
+ // Unsafe swap to avoid the bounds check in safe swap.
+ let pa: *mut T = self.get_unchecked_mut(i);
+ let pb: *mut T = self.get_unchecked_mut(ln - i - 1);
+ ptr::swap(pa, pb);
+ }
+ i += 1;
+ }
+ }
+
+ /// Returns an iterator over the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &[1, 2, 4];
+ /// let mut iterator = x.iter();
+ ///
+ /// assert_eq!(iterator.next(), Some(&1));
+ /// assert_eq!(iterator.next(), Some(&2));
+ /// assert_eq!(iterator.next(), Some(&4));
+ /// assert_eq!(iterator.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter::new(self)
+ }
+
+ /// Returns an iterator that allows modifying each value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = &mut [1, 2, 4];
+ /// for elem in x.iter_mut() {
+ /// *elem += 2;
+ /// }
+ /// assert_eq!(x, &[3, 4, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut::new(self)
+ }
+
+ /// Returns an iterator over all contiguous windows of length
+ /// `size`. The windows overlap. If the slice is shorter than
+ /// `size`, the iterator returns no values.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['r', 'u', 's', 't'];
+ /// let mut iter = slice.windows(2);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'u']);
+ /// assert_eq!(iter.next().unwrap(), &['u', 's']);
+ /// assert_eq!(iter.next().unwrap(), &['s', 't']);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the slice is shorter than `size`:
+ ///
+ /// ```
+ /// let slice = ['f', 'o', 'o'];
+ /// let mut iter = slice.windows(4);
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn windows(&self, size: usize) -> Windows<'_, T> {
+ let size = NonZeroUsize::new(size).expect("size is zero");
+ Windows::new(self, size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`chunks_exact`] for a variant of this iterator that returns chunks of always exactly
+ /// `chunk_size` elements, and [`rchunks`] for the same iterator but starting at the end of the
+ /// slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks(2);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert_eq!(iter.next().unwrap(), &['m']);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`chunks_exact`]: #method.chunks_exact
+ /// [`rchunks`]: #method.rchunks
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
+ assert_ne!(chunk_size, 0);
+ Chunks::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`chunks_exact_mut`] for a variant of this iterator that returns chunks of always
+ /// exactly `chunk_size` elements, and [`rchunks_mut`] for the same iterator but starting at
+ /// the end of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.chunks_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 3]);
+ /// ```
+ ///
+ /// [`chunks_exact_mut`]: #method.chunks_exact_mut
+ /// [`rchunks_mut`]: #method.rchunks_mut
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
+ assert_ne!(chunk_size, 0);
+ ChunksMut::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
+ /// from the `remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks`].
+ ///
+ /// See [`chunks`] for a variant of this iterator that also returns the remainder as a smaller
+ /// chunk, and [`rchunks_exact`] for the same iterator but starting at the end of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.chunks_exact(2);
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), &['m']);
+ /// ```
+ ///
+ /// [`chunks`]: #method.chunks
+ /// [`rchunks_exact`]: #method.rchunks_exact
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ #[inline]
+ pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
+ assert_ne!(chunk_size, 0);
+ ChunksExact::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the `into_remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks_mut`].
+ ///
+ /// See [`chunks_mut`] for a variant of this iterator that also returns the remainder as a
+ /// smaller chunk, and [`rchunks_exact_mut`] for the same iterator but starting at the end of
+ /// the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.chunks_exact_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 0]);
+ /// ```
+ ///
+ /// [`chunks_mut`]: #method.chunks_mut
+ /// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
+ #[stable(feature = "chunks_exact", since = "1.31.0")]
+ #[inline]
+ pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
+ assert_ne!(chunk_size, 0);
+ ChunksExactMut::new(self, chunk_size)
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// starting at the beginning of the slice,
+ /// and a remainder slice with length strictly less than `N`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let (chunks, remainder) = slice.as_chunks();
+ /// assert_eq!(chunks, &[['l', 'o'], ['r', 'e']]);
+ /// assert_eq!(remainder, &['m']);
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
+ assert_ne!(N, 0);
+ let len = self.len() / N;
+ let (multiple_of_n, remainder) = self.split_at(len * N);
+ // SAFETY: We cast a slice of `len * N` elements into
+ // a slice of `len` many `N` elements chunks.
+ let array_slice: &[[T; N]] = unsafe { from_raw_parts(multiple_of_n.as_ptr().cast(), len) };
+ (array_slice, remainder)
+ }
+
+ /// Returns an iterator over `N` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are array references and do not overlap. If `N` does not divide the
+ /// length of the slice, then the last up to `N-1` elements will be omitted and can be
+ /// retrieved from the `remainder` function of the iterator.
+ ///
+ /// This method is the const generic equivalent of [`chunks_exact`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_chunks)]
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.array_chunks();
+ /// assert_eq!(iter.next().unwrap(), &['l', 'o']);
+ /// assert_eq!(iter.next().unwrap(), &['r', 'e']);
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), &['m']);
+ /// ```
+ ///
+ /// [`chunks_exact`]: #method.chunks_exact
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ #[inline]
+ pub fn array_chunks<const N: usize>(&self) -> ArrayChunks<'_, T, N> {
+ assert_ne!(N, 0);
+ ArrayChunks::new(self)
+ }
+
+ /// Splits the slice into a slice of `N`-element arrays,
+ /// starting at the beginning of the slice,
+ /// and a remainder slice with length strictly less than `N`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_as_chunks)]
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// let (chunks, remainder) = v.as_chunks_mut();
+ /// remainder[0] = 9;
+ /// for chunk in chunks {
+ /// *chunk = [count; 2];
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 9]);
+ /// ```
+ #[unstable(feature = "slice_as_chunks", issue = "74985")]
+ #[inline]
+ pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
+ assert_ne!(N, 0);
+ let len = self.len() / N;
+ let (multiple_of_n, remainder) = self.split_at_mut(len * N);
+ let array_slice: &mut [[T; N]] =
+ // SAFETY: We cast a slice of `len * N` elements into
+ // a slice of `len` many `N` elements chunks.
+ unsafe { from_raw_parts_mut(multiple_of_n.as_mut_ptr().cast(), len) };
+ (array_slice, remainder)
+ }
+
+ /// Returns an iterator over `N` elements of the slice at a time, starting at the
+ /// beginning of the slice.
+ ///
+ /// The chunks are mutable array references and do not overlap. If `N` does not divide
+ /// the length of the slice, then the last up to `N-1` elements will be omitted and
+ /// can be retrieved from the `into_remainder` function of the iterator.
+ ///
+ /// This method is the const generic equivalent of [`chunks_exact_mut`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_chunks)]
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.array_chunks_mut() {
+ /// *chunk = [count; 2];
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[1, 1, 2, 2, 0]);
+ /// ```
+ ///
+ /// [`chunks_exact_mut`]: #method.chunks_exact_mut
+ #[unstable(feature = "array_chunks", issue = "74985")]
+ #[inline]
+ pub fn array_chunks_mut<const N: usize>(&mut self) -> ArrayChunksMut<'_, T, N> {
+ assert_ne!(N, 0);
+ ArrayChunksMut::new(self)
+ }
+
+ /// Returns an iterator over overlapping windows of `N` elements of a slice,
+ /// starting at the beginning of the slice.
+ ///
+ /// This is the const generic equivalent of [`windows`].
+ ///
+ /// If `N` is greater than the size of the slice, it will return no windows.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `N` is 0. This check will most probably get changed to a compile time
+ /// error before this method gets stabilized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(array_windows)]
+ /// let slice = [0, 1, 2, 3];
+ /// let mut iter = slice.array_windows();
+ /// assert_eq!(iter.next().unwrap(), &[0, 1]);
+ /// assert_eq!(iter.next().unwrap(), &[1, 2]);
+ /// assert_eq!(iter.next().unwrap(), &[2, 3]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`windows`]: #method.windows
+ #[unstable(feature = "array_windows", issue = "75027")]
+ #[inline]
+ pub fn array_windows<const N: usize>(&self) -> ArrayWindows<'_, T, N> {
+ assert_ne!(N, 0);
+ ArrayWindows::new(self)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
+ /// of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`rchunks_exact`] for a variant of this iterator that returns chunks of always exactly
+ /// `chunk_size` elements, and [`chunks`] for the same iterator but starting at the beginning
+ /// of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.rchunks(2);
+ /// assert_eq!(iter.next().unwrap(), &['e', 'm']);
+ /// assert_eq!(iter.next().unwrap(), &['o', 'r']);
+ /// assert_eq!(iter.next().unwrap(), &['l']);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// [`rchunks_exact`]: #method.rchunks_exact
+ /// [`chunks`]: #method.chunks
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> {
+ assert!(chunk_size != 0);
+ RChunks::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
+ /// of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last chunk will not have length `chunk_size`.
+ ///
+ /// See [`rchunks_exact_mut`] for a variant of this iterator that returns chunks of always
+ /// exactly `chunk_size` elements, and [`chunks_mut`] for the same iterator but starting at the
+ /// beginning of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.rchunks_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[3, 2, 2, 1, 1]);
+ /// ```
+ ///
+ /// [`rchunks_exact_mut`]: #method.rchunks_exact_mut
+ /// [`chunks_mut`]: #method.chunks_mut
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> {
+ assert!(chunk_size != 0);
+ RChunksMut::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the
+ /// end of the slice.
+ ///
+ /// The chunks are slices and do not overlap. If `chunk_size` does not divide the length of the
+ /// slice, then the last up to `chunk_size-1` elements will be omitted and can be retrieved
+ /// from the `remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks`].
+ ///
+ /// See [`rchunks`] for a variant of this iterator that also returns the remainder as a smaller
+ /// chunk, and [`chunks_exact`] for the same iterator but starting at the beginning of the
+ /// slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = ['l', 'o', 'r', 'e', 'm'];
+ /// let mut iter = slice.rchunks_exact(2);
+ /// assert_eq!(iter.next().unwrap(), &['e', 'm']);
+ /// assert_eq!(iter.next().unwrap(), &['o', 'r']);
+ /// assert!(iter.next().is_none());
+ /// assert_eq!(iter.remainder(), &['l']);
+ /// ```
+ ///
+ /// [`chunks`]: #method.chunks
+ /// [`rchunks`]: #method.rchunks
+ /// [`chunks_exact`]: #method.chunks_exact
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> {
+ assert!(chunk_size != 0);
+ RChunksExact::new(self, chunk_size)
+ }
+
+ /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end
+ /// of the slice.
+ ///
+ /// The chunks are mutable slices, and do not overlap. If `chunk_size` does not divide the
+ /// length of the slice, then the last up to `chunk_size-1` elements will be omitted and can be
+ /// retrieved from the `into_remainder` function of the iterator.
+ ///
+ /// Due to each chunk having exactly `chunk_size` elements, the compiler can often optimize the
+ /// resulting code better than in the case of [`chunks_mut`].
+ ///
+ /// See [`rchunks_mut`] for a variant of this iterator that also returns the remainder as a
+ /// smaller chunk, and [`chunks_exact_mut`] for the same iterator but starting at the beginning
+ /// of the slice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `chunk_size` is 0.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = &mut [0, 0, 0, 0, 0];
+ /// let mut count = 1;
+ ///
+ /// for chunk in v.rchunks_exact_mut(2) {
+ /// for elem in chunk.iter_mut() {
+ /// *elem += count;
+ /// }
+ /// count += 1;
+ /// }
+ /// assert_eq!(v, &[0, 2, 2, 1, 1]);
+ /// ```
+ ///
+ /// [`chunks_mut`]: #method.chunks_mut
+ /// [`rchunks_mut`]: #method.rchunks_mut
+ /// [`chunks_exact_mut`]: #method.chunks_exact_mut
+ #[stable(feature = "rchunks", since = "1.31.0")]
+ #[inline]
+ pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> {
+ assert!(chunk_size != 0);
+ RChunksExactMut::new(self, chunk_size)
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [1, 2, 3, 4, 5, 6];
+ ///
+ /// {
+ /// let (left, right) = v.split_at(0);
+ /// assert_eq!(left, []);
+ /// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_at(2);
+ /// assert_eq!(left, [1, 2]);
+ /// assert_eq!(right, [3, 4, 5, 6]);
+ /// }
+ ///
+ /// {
+ /// let (left, right) = v.split_at(6);
+ /// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, []);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
+ assert!(mid <= self.len());
+ // SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
+ // fulfills the requirements of `from_raw_parts_mut`.
+ unsafe { self.split_at_unchecked(mid) }
+ }
+
+ /// Divides one mutable slice into two at an index.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// // scoped to restrict the lifetime of the borrows
+ /// {
+ /// let (left, right) = v.split_at_mut(2);
+ /// assert_eq!(left, [1, 0]);
+ /// assert_eq!(right, [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// }
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ assert!(mid <= self.len());
+ // SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
+ // fulfills the requirements of `from_raw_parts_mut`.
+ unsafe { self.split_at_mut_unchecked(mid) }
+ }
+
+ /// Divides one slice into two at an index, without doing bounds checking.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// For a safe alternative see [`split_at`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used. The caller has to ensure that
+ /// `0 <= mid <= self.len()`.
+ ///
+ /// [`split_at`]: #method.split_at
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```compile_fail
+ /// #![feature(slice_split_at_unchecked)]
+ ///
+ /// let v = [1, 2, 3, 4, 5, 6];
+ ///
+ /// unsafe {
+ /// let (left, right) = v.split_at_unchecked(0);
+ /// assert_eq!(left, []);
+ /// assert_eq!(right, [1, 2, 3, 4, 5, 6]);
+ /// }
+ ///
+ /// unsafe {
+ /// let (left, right) = v.split_at_unchecked(2);
+ /// assert_eq!(left, [1, 2]);
+ /// assert_eq!(right, [3, 4, 5, 6]);
+ /// }
+ ///
+ /// unsafe {
+ /// let (left, right) = v.split_at_unchecked(6);
+ /// assert_eq!(left, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(right, []);
+ /// }
+ /// ```
+ #[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[inline]
+ unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
+ // SAFETY: Caller has to check that `0 <= mid <= self.len()`
+ unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
+ }
+
+ /// Divides one mutable slice into two at an index, without doing bounds checking.
+ ///
+ /// The first will contain all indices from `[0, mid)` (excluding
+ /// the index `mid` itself) and the second will contain all
+ /// indices from `[mid, len)` (excluding the index `len` itself).
+ ///
+ /// For a safe alternative see [`split_at_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with an out-of-bounds index is *[undefined behavior]*
+ /// even if the resulting reference is not used. The caller has to ensure that
+ /// `0 <= mid <= self.len()`.
+ ///
+ /// [`split_at_mut`]: #method.split_at_mut
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```compile_fail
+ /// #![feature(slice_split_at_unchecked)]
+ ///
+ /// let mut v = [1, 0, 3, 0, 5, 6];
+ /// // scoped to restrict the lifetime of the borrows
+ /// unsafe {
+ /// let (left, right) = v.split_at_mut_unchecked(2);
+ /// assert_eq!(left, [1, 0]);
+ /// assert_eq!(right, [3, 0, 5, 6]);
+ /// left[1] = 2;
+ /// right[1] = 4;
+ /// }
+ /// assert_eq!(v, [1, 2, 3, 4, 5, 6]);
+ /// ```
+ #[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
+ #[inline]
+ unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+
+ // SAFETY: Caller has to check that `0 <= mid <= self.len()`.
+ //
+ // `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
+ // is fine.
+ unsafe { (from_raw_parts_mut(ptr, mid), from_raw_parts_mut(ptr.add(mid), len - mid)) }
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`. The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = [10, 40, 33, 20];
+ /// let mut iter = slice.split(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10, 40]);
+ /// assert_eq!(iter.next().unwrap(), &[20]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the first element is matched, an empty slice will be the first item
+ /// returned by the iterator. Similarly, if the last element in the slice
+ /// is matched, an empty slice will be the last item returned by the
+ /// iterator:
+ ///
+ /// ```
+ /// let slice = [10, 40, 33];
+ /// let mut iter = slice.split(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10, 40]);
+ /// assert_eq!(iter.next().unwrap(), &[]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If two matched elements are directly adjacent, an empty slice will be
+ /// present between them:
+ ///
+ /// ```
+ /// let slice = [10, 6, 33, 20];
+ /// let mut iter = slice.split(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10]);
+ /// assert_eq!(iter.next().unwrap(), &[]);
+ /// assert_eq!(iter.next().unwrap(), &[20]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split<F>(&self, pred: F) -> Split<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ Split::new(self, pred)
+ }
+
+ /// Returns an iterator over mutable subslices separated by elements that
+ /// match `pred`. The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.split_mut(|num| *num % 3 == 0) {
+ /// group[0] = 1;
+ /// }
+ /// assert_eq!(v, [1, 40, 30, 1, 60, 1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split_mut<F>(&mut self, pred: F) -> SplitMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitMut::new(self, pred)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`. The matched element is contained in the end of the previous
+ /// subslice as a terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_inclusive)]
+ /// let slice = [10, 40, 33, 20];
+ /// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+ /// assert_eq!(iter.next().unwrap(), &[20]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ ///
+ /// If the last element of the slice is matched,
+ /// that element will be considered the terminator of the preceding slice.
+ /// That slice will be the last item returned by the iterator.
+ ///
+ /// ```
+ /// #![feature(split_inclusive)]
+ /// let slice = [3, 10, 40, 33];
+ /// let mut iter = slice.split_inclusive(|num| num % 3 == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[3]);
+ /// assert_eq!(iter.next().unwrap(), &[10, 40, 33]);
+ /// assert!(iter.next().is_none());
+ /// ```
+ #[unstable(feature = "split_inclusive", issue = "72360")]
+ #[inline]
+ pub fn split_inclusive<F>(&self, pred: F) -> SplitInclusive<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitInclusive::new(self, pred)
+ }
+
+ /// Returns an iterator over mutable subslices separated by elements that
+ /// match `pred`. The matched element is contained in the previous
+ /// subslice as a terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_inclusive)]
+ /// let mut v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.split_inclusive_mut(|num| *num % 3 == 0) {
+ /// let terminator_idx = group.len()-1;
+ /// group[terminator_idx] = 1;
+ /// }
+ /// assert_eq!(v, [10, 40, 1, 20, 1, 1]);
+ /// ```
+ #[unstable(feature = "split_inclusive", issue = "72360")]
+ #[inline]
+ pub fn split_inclusive_mut<F>(&mut self, pred: F) -> SplitInclusiveMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitInclusiveMut::new(self, pred)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`, starting at the end of the slice and working backwards.
+ /// The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let slice = [11, 22, 33, 0, 44, 55];
+ /// let mut iter = slice.rsplit(|num| *num == 0);
+ ///
+ /// assert_eq!(iter.next().unwrap(), &[44, 55]);
+ /// assert_eq!(iter.next().unwrap(), &[11, 22, 33]);
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ ///
+ /// As with `split()`, if the first or last element is matched, an empty
+ /// slice will be the first (or last) item returned by the iterator.
+ ///
+ /// ```
+ /// let v = &[0, 1, 1, 2, 3, 5, 8];
+ /// let mut it = v.rsplit(|n| *n % 2 == 0);
+ /// assert_eq!(it.next().unwrap(), &[]);
+ /// assert_eq!(it.next().unwrap(), &[3, 5]);
+ /// assert_eq!(it.next().unwrap(), &[1, 1]);
+ /// assert_eq!(it.next().unwrap(), &[]);
+ /// assert_eq!(it.next(), None);
+ /// ```
+ #[stable(feature = "slice_rsplit", since = "1.27.0")]
+ #[inline]
+ pub fn rsplit<F>(&self, pred: F) -> RSplit<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplit::new(self, pred)
+ }
+
+ /// Returns an iterator over mutable subslices separated by elements that
+ /// match `pred`, starting at the end of the slice and working
+ /// backwards. The matched element is not contained in the subslices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [100, 400, 300, 200, 600, 500];
+ ///
+ /// let mut count = 0;
+ /// for group in v.rsplit_mut(|num| *num % 3 == 0) {
+ /// count += 1;
+ /// group[0] = count;
+ /// }
+ /// assert_eq!(v, [3, 400, 300, 2, 600, 1]);
+ /// ```
+ ///
+ #[stable(feature = "slice_rsplit", since = "1.27.0")]
+ #[inline]
+ pub fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplitMut::new(self, pred)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`, limited to returning at most `n` items. The matched element is
+ /// not contained in the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// Print the slice split once by numbers divisible by 3 (i.e., `[10, 40]`,
+ /// `[20, 60, 50]`):
+ ///
+ /// ```
+ /// let v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.splitn(2, |num| *num % 3 == 0) {
+ /// println!("{:?}", group);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn splitn<F>(&self, n: usize, pred: F) -> SplitN<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitN::new(self.split(pred), n)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred`, limited to returning at most `n` items. The matched element is
+ /// not contained in the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.splitn_mut(2, |num| *num % 3 == 0) {
+ /// group[0] = 1;
+ /// }
+ /// assert_eq!(v, [1, 40, 30, 1, 60, 50]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn splitn_mut<F>(&mut self, n: usize, pred: F) -> SplitNMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ SplitNMut::new(self.split_mut(pred), n)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred` limited to returning at most `n` items. This starts at the end of
+ /// the slice and works backwards. The matched element is not contained in
+ /// the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// Print the slice split once, starting from the end, by numbers divisible
+ /// by 3 (i.e., `[50]`, `[10, 40, 30, 20]`):
+ ///
+ /// ```
+ /// let v = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in v.rsplitn(2, |num| *num % 3 == 0) {
+ /// println!("{:?}", group);
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplitn<F>(&self, n: usize, pred: F) -> RSplitN<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplitN::new(self.rsplit(pred), n)
+ }
+
+ /// Returns an iterator over subslices separated by elements that match
+ /// `pred` limited to returning at most `n` items. This starts at the end of
+ /// the slice and works backwards. The matched element is not contained in
+ /// the subslices.
+ ///
+ /// The last element returned, if any, will contain the remainder of the
+ /// slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = [10, 40, 30, 20, 60, 50];
+ ///
+ /// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) {
+ /// group[0] = 1;
+ /// }
+ /// assert_eq!(s, [1, 40, 30, 20, 60, 1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplitn_mut<F>(&mut self, n: usize, pred: F) -> RSplitNMut<'_, T, F>
+ where
+ F: FnMut(&T) -> bool,
+ {
+ RSplitNMut::new(self.rsplit_mut(pred), n)
+ }
+
+ /// Returns `true` if the slice contains an element with the given value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert!(v.contains(&30));
+ /// assert!(!v.contains(&50));
+ /// ```
+ ///
+ /// If you do not have an `&T`, but just an `&U` such that `T: Borrow<U>`
+ /// (e.g. `String: Borrow<str>`), you can use `iter().any`:
+ ///
+ /// ```
+ /// let v = [String::from("hello"), String::from("world")]; // slice of `String`
+ /// assert!(v.iter().any(|e| e == "hello")); // search with `&str`
+ /// assert!(!v.iter().any(|e| e == "hi"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn contains(&self, x: &T) -> bool
+ where
+ T: PartialEq,
+ {
+ cmp::SliceContains::slice_contains(x, self)
+ }
+
+ /// Returns `true` if `needle` is a prefix of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert!(v.starts_with(&[10]));
+ /// assert!(v.starts_with(&[10, 40]));
+ /// assert!(!v.starts_with(&[50]));
+ /// assert!(!v.starts_with(&[10, 50]));
+ /// ```
+ ///
+ /// Always returns `true` if `needle` is an empty slice:
+ ///
+ /// ```
+ /// let v = &[10, 40, 30];
+ /// assert!(v.starts_with(&[]));
+ /// let v: &[u8] = &[];
+ /// assert!(v.starts_with(&[]));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn starts_with(&self, needle: &[T]) -> bool
+ where
+ T: PartialEq,
+ {
+ let n = needle.len();
+ self.len() >= n && needle == &self[..n]
+ }
+
+ /// Returns `true` if `needle` is a suffix of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = [10, 40, 30];
+ /// assert!(v.ends_with(&[30]));
+ /// assert!(v.ends_with(&[40, 30]));
+ /// assert!(!v.ends_with(&[50]));
+ /// assert!(!v.ends_with(&[50, 30]));
+ /// ```
+ ///
+ /// Always returns `true` if `needle` is an empty slice:
+ ///
+ /// ```
+ /// let v = &[10, 40, 30];
+ /// assert!(v.ends_with(&[]));
+ /// let v: &[u8] = &[];
+ /// assert!(v.ends_with(&[]));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn ends_with(&self, needle: &[T]) -> bool
+ where
+ T: PartialEq,
+ {
+ let (m, n) = (self.len(), needle.len());
+ m >= n && needle == &self[m - n..]
+ }
+
+ /// Returns a subslice with the prefix removed.
+ ///
+ /// If the slice starts with `prefix`, returns the subslice after the prefix, wrapped in `Some`.
+ /// If `prefix` is empty, simply returns the original slice.
+ ///
+ /// If the slice does not start with `prefix`, returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_strip)]
+ /// let v = &[10, 40, 30];
+ /// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+ /// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+ /// assert_eq!(v.strip_prefix(&[50]), None);
+ /// assert_eq!(v.strip_prefix(&[10, 50]), None);
+ /// ```
+ #[must_use = "returns the subslice without modifying the original"]
+ #[unstable(feature = "slice_strip", issue = "73413")]
+ pub fn strip_prefix(&self, prefix: &[T]) -> Option<&[T]>
+ where
+ T: PartialEq,
+ {
+ let n = prefix.len();
+ if n <= self.len() {
+ let (head, tail) = self.split_at(n);
+ if head == prefix {
+ return Some(tail);
+ }
+ }
+ None
+ }
+
+ /// Returns a subslice with the suffix removed.
+ ///
+ /// If the slice ends with `suffix`, returns the subslice before the suffix, wrapped in `Some`.
+ /// If `suffix` is empty, simply returns the original slice.
+ ///
+ /// If the slice does not end with `suffix`, returns `None`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_strip)]
+ /// let v = &[10, 40, 30];
+ /// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+ /// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+ /// assert_eq!(v.strip_suffix(&[50]), None);
+ /// assert_eq!(v.strip_suffix(&[50, 30]), None);
+ /// ```
+ #[must_use = "returns the subslice without modifying the original"]
+ #[unstable(feature = "slice_strip", issue = "73413")]
+ pub fn strip_suffix(&self, suffix: &[T]) -> Option<&[T]>
+ where
+ T: PartialEq,
+ {
+ let (len, n) = (self.len(), suffix.len());
+ if n <= len {
+ let (head, tail) = self.split_at(len - n);
+ if tail == suffix {
+ return Some(head);
+ }
+ }
+ None
+ }
+
+ /// Binary searches this sorted slice for a given element.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ ///
+ /// assert_eq!(s.binary_search(&13), Ok(9));
+ /// assert_eq!(s.binary_search(&4), Err(7));
+ /// assert_eq!(s.binary_search(&100), Err(13));
+ /// let r = s.binary_search(&1);
+ /// assert!(match r { Ok(1..=4) => true, _ => false, });
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted vector, while maintaining
+ /// sort order:
+ ///
+ /// ```
+ /// let mut s = vec![0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ /// let num = 42;
+ /// let idx = s.binary_search(&num).unwrap_or_else(|x| x);
+ /// s.insert(idx, num);
+ /// assert_eq!(s, [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn binary_search(&self, x: &T) -> Result<usize, usize>
+ where
+ T: Ord,
+ {
+ self.binary_search_by(|p| p.cmp(x))
+ }
+
+ /// Binary searches this sorted slice with a comparator function.
+ ///
+ /// The comparator function should implement an order consistent
+ /// with the sort order of the underlying slice, returning an
+ /// order code that indicates whether its argument is `Less`,
+ /// `Equal` or `Greater` the desired target.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
+ ///
+ /// let seek = 13;
+ /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9));
+ /// let seek = 4;
+ /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7));
+ /// let seek = 100;
+ /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13));
+ /// let seek = 1;
+ /// let r = s.binary_search_by(|probe| probe.cmp(&seek));
+ /// assert!(match r { Ok(1..=4) => true, _ => false, });
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> Ordering,
+ {
+ let s = self;
+ let mut size = s.len();
+ if size == 0 {
+ return Err(0);
+ }
+ let mut base = 0usize;
+ while size > 1 {
+ let half = size / 2;
+ let mid = base + half;
+ // SAFETY: the call is made safe by the following inconstants:
+ // - `mid >= 0`: by definition
+ // - `mid < size`: `mid = size / 2 + size / 4 + size / 8 ...`
+ let cmp = f(unsafe { s.get_unchecked(mid) });
+ base = if cmp == Greater { base } else { mid };
+ size -= half;
+ }
+ // SAFETY: base is always in [0, size) because base <= mid.
+ let cmp = f(unsafe { s.get_unchecked(base) });
+ if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) }
+ }
+
+ /// Binary searches this sorted slice with a key extraction function.
+ ///
+ /// Assumes that the slice is sorted by the key, for instance with
+ /// [`sort_by_key`] using the same key extraction function.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// [`sort_by_key`]: #method.sort_by_key
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements in a slice of pairs sorted by
+ /// their second elements. The first is found, with a uniquely
+ /// determined position; the second and third are not found; the
+ /// fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
+ /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+ /// (1, 21), (2, 34), (4, 55)];
+ ///
+ /// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9));
+ /// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7));
+ /// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13));
+ /// let r = s.binary_search_by_key(&1, |&(a,b)| b);
+ /// assert!(match r { Ok(1..=4) => true, _ => false, });
+ /// ```
+ #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")]
+ #[inline]
+ pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> B,
+ B: Ord,
+ {
+ self.binary_search_by(|k| f(k).cmp(b))
+ }
+
+ /// Sorts the slice, but may not preserve the order of equal elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
+ /// slice consists of several concatenated sorted sequences.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.sort_unstable();
+ /// assert!(v == [-5, -3, 1, 2, 4]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[stable(feature = "sort_unstable", since = "1.20.0")]
+ #[inline]
+ pub fn sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ sort::quicksort(self, |a, b| a.lt(b));
+ }
+
+ /// Sorts the slice with a comparator function, but may not preserve the order of equal
+ /// elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// It is typically faster than stable sorting, except in a few special cases, e.g., when the
+ /// slice consists of several concatenated sorted sequences.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.sort_unstable_by(|a, b| a.cmp(b));
+ /// assert!(v == [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.sort_unstable_by(|a, b| b.cmp(a));
+ /// assert!(v == [5, 4, 3, 2, 1]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[stable(feature = "sort_unstable", since = "1.20.0")]
+ #[inline]
+ pub fn sort_unstable_by<F>(&mut self, mut compare: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less);
+ }
+
+ /// Sorts the slice with a key extraction function, but may not preserve the order of equal
+ /// elements.
+ ///
+ /// This sort is unstable (i.e., may reorder equal elements), in-place
+ /// (i.e., does not allocate), and *O*(m \* *n* \* log(*n*)) worst-case, where the key function is
+ /// *O*(*m*).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key)
+ /// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in
+ /// cases where the key function is expensive.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.sort_unstable_by_key(|k| k.abs());
+ /// assert!(v == [1, 2, -3, 4, -5]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[stable(feature = "sort_unstable", since = "1.20.0")]
+ #[inline]
+ pub fn sort_unstable_by_key<K, F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ sort::quicksort(self, |a, b| f(a).lt(&f(b)));
+ }
+
+ /// Reorder the slice such that the element at `index` is at its final sorted position.
+ #[unstable(feature = "slice_partition_at_index", issue = "55300")]
+ #[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable() instead")]
+ #[inline]
+ pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
+ where
+ T: Ord,
+ {
+ self.select_nth_unstable(index)
+ }
+
+ /// Reorder the slice with a comparator function such that the element at `index` is at its
+ /// final sorted position.
+ #[unstable(feature = "slice_partition_at_index", issue = "55300")]
+ #[rustc_deprecated(since = "1.49.0", reason = "use select_nth_unstable_by() instead")]
+ #[inline]
+ pub fn partition_at_index_by<F>(
+ &mut self,
+ index: usize,
+ compare: F,
+ ) -> (&mut [T], &mut T, &mut [T])
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ self.select_nth_unstable_by(index, compare)
+ }
+
+ /// Reorder the slice with a key extraction function such that the element at `index` is at its
+ /// final sorted position.
+ #[unstable(feature = "slice_partition_at_index", issue = "55300")]
+ #[rustc_deprecated(since = "1.49.0", reason = "use the select_nth_unstable_by_key() instead")]
+ #[inline]
+ pub fn partition_at_index_by_key<K, F>(
+ &mut self,
+ index: usize,
+ f: F,
+ ) -> (&mut [T], &mut T, &mut [T])
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ self.select_nth_unstable_by_key(index, f)
+ }
+
+ /// Reorder the slice such that the element at `index` is at its final sorted position.
+ ///
+ /// This reordering has the additional property that any value at position `i < index` will be
+ /// less than or equal to any value at a position `j > index`. Additionally, this reordering is
+ /// unstable (i.e. any number of equal elements may end up at position `index`), in-place
+ /// (i.e. does not allocate), and *O*(*n*) worst-case. This function is also/ known as "kth
+ /// element" in other libraries. It returns a triplet of the following values: all elements less
+ /// than the one at the given index, the value at the given index, and all elements greater than
+ /// the one at the given index.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
+ /// used for [`sort_unstable`].
+ ///
+ /// [`sort_unstable`]: #method.sort_unstable
+ ///
+ /// # Panics
+ ///
+ /// Panics when `index >= len()`, meaning it always panics on empty slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// // Find the median
+ /// v.select_nth_unstable(2);
+ ///
+ /// // We are only guaranteed the slice will be one of the following, based on the way we sort
+ /// // about the specified index.
+ /// assert!(v == [-3, -5, 1, 2, 4] ||
+ /// v == [-5, -3, 1, 2, 4] ||
+ /// v == [-3, -5, 1, 4, 2] ||
+ /// v == [-5, -3, 1, 4, 2]);
+ /// ```
+ #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
+ #[inline]
+ pub fn select_nth_unstable(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T])
+ where
+ T: Ord,
+ {
+ let mut f = |a: &T, b: &T| a.lt(b);
+ sort::partition_at_index(self, index, &mut f)
+ }
+
+ /// Reorder the slice with a comparator function such that the element at `index` is at its
+ /// final sorted position.
+ ///
+ /// This reordering has the additional property that any value at position `i < index` will be
+ /// less than or equal to any value at a position `j > index` using the comparator function.
+ /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
+ /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
+ /// is also known as "kth element" in other libraries. It returns a triplet of the following
+ /// values: all elements less than the one at the given index, the value at the given index,
+ /// and all elements greater than the one at the given index, using the provided comparator
+ /// function.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
+ /// used for [`sort_unstable`].
+ ///
+ /// [`sort_unstable`]: #method.sort_unstable
+ ///
+ /// # Panics
+ ///
+ /// Panics when `index >= len()`, meaning it always panics on empty slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// // Find the median as if the slice were sorted in descending order.
+ /// v.select_nth_unstable_by(2, |a, b| b.cmp(a));
+ ///
+ /// // We are only guaranteed the slice will be one of the following, based on the way we sort
+ /// // about the specified index.
+ /// assert!(v == [2, 4, 1, -5, -3] ||
+ /// v == [2, 4, 1, -3, -5] ||
+ /// v == [4, 2, 1, -5, -3] ||
+ /// v == [4, 2, 1, -3, -5]);
+ /// ```
+ #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
+ #[inline]
+ pub fn select_nth_unstable_by<F>(
+ &mut self,
+ index: usize,
+ mut compare: F,
+ ) -> (&mut [T], &mut T, &mut [T])
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ let mut f = |a: &T, b: &T| compare(a, b) == Less;
+ sort::partition_at_index(self, index, &mut f)
+ }
+
+ /// Reorder the slice with a key extraction function such that the element at `index` is at its
+ /// final sorted position.
+ ///
+ /// This reordering has the additional property that any value at position `i < index` will be
+ /// less than or equal to any value at a position `j > index` using the key extraction function.
+ /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at
+ /// position `index`), in-place (i.e. does not allocate), and *O*(*n*) worst-case. This function
+ /// is also known as "kth element" in other libraries. It returns a triplet of the following
+ /// values: all elements less than the one at the given index, the value at the given index, and
+ /// all elements greater than the one at the given index, using the provided key extraction
+ /// function.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on the quickselect portion of the same quicksort algorithm
+ /// used for [`sort_unstable`].
+ ///
+ /// [`sort_unstable`]: #method.sort_unstable
+ ///
+ /// # Panics
+ ///
+ /// Panics when `index >= len()`, meaning it always panics on empty slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// // Return the median as if the array were sorted according to absolute value.
+ /// v.select_nth_unstable_by_key(2, |a| a.abs());
+ ///
+ /// // We are only guaranteed the slice will be one of the following, based on the way we sort
+ /// // about the specified index.
+ /// assert!(v == [1, 2, -3, 4, -5] ||
+ /// v == [1, 2, -3, -5, 4] ||
+ /// v == [2, 1, -3, 4, -5] ||
+ /// v == [2, 1, -3, -5, 4]);
+ /// ```
+ #[stable(feature = "slice_select_nth_unstable", since = "1.49.0")]
+ #[inline]
+ pub fn select_nth_unstable_by_key<K, F>(
+ &mut self,
+ index: usize,
+ mut f: F,
+ ) -> (&mut [T], &mut T, &mut [T])
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ let mut g = |a: &T, b: &T| f(a).lt(&f(b));
+ sort::partition_at_index(self, index, &mut g)
+ }
+
+ /// Moves all consecutive repeated elements to the end of the slice according to the
+ /// [`PartialEq`] trait implementation.
+ ///
+ /// Returns two slices. The first contains no consecutive repeated elements.
+ /// The second contains all the duplicates in no specified order.
+ ///
+ /// If the slice is sorted, the first returned slice contains no duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_partition_dedup)]
+ ///
+ /// let mut slice = [1, 2, 2, 3, 3, 2, 1, 1];
+ ///
+ /// let (dedup, duplicates) = slice.partition_dedup();
+ ///
+ /// assert_eq!(dedup, [1, 2, 3, 2, 1]);
+ /// assert_eq!(duplicates, [2, 3, 1]);
+ /// ```
+ #[unstable(feature = "slice_partition_dedup", issue = "54279")]
+ #[inline]
+ pub fn partition_dedup(&mut self) -> (&mut [T], &mut [T])
+ where
+ T: PartialEq,
+ {
+ self.partition_dedup_by(|a, b| a == b)
+ }
+
+ /// Moves all but the first of consecutive elements to the end of the slice satisfying
+ /// a given equality relation.
+ ///
+ /// Returns two slices. The first contains no consecutive repeated elements.
+ /// The second contains all the duplicates in no specified order.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the slice and
+ /// must determine if the elements compare equal. The elements are passed in opposite order
+ /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is moved
+ /// at the end of the slice.
+ ///
+ /// If the slice is sorted, the first returned slice contains no duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_partition_dedup)]
+ ///
+ /// let mut slice = ["foo", "Foo", "BAZ", "Bar", "bar", "baz", "BAZ"];
+ ///
+ /// let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+ ///
+ /// assert_eq!(dedup, ["foo", "BAZ", "Bar", "baz"]);
+ /// assert_eq!(duplicates, ["bar", "Foo", "BAZ"]);
+ /// ```
+ #[unstable(feature = "slice_partition_dedup", issue = "54279")]
+ #[inline]
+ pub fn partition_dedup_by<F>(&mut self, mut same_bucket: F) -> (&mut [T], &mut [T])
+ where
+ F: FnMut(&mut T, &mut T) -> bool,
+ {
+ // Although we have a mutable reference to `self`, we cannot make
+ // *arbitrary* changes. The `same_bucket` calls could panic, so we
+ // must ensure that the slice is in a valid state at all times.
+ //
+ // The way that we handle this is by using swaps; we iterate
+ // over all the elements, swapping as we go so that at the end
+ // the elements we wish to keep are in the front, and those we
+ // wish to reject are at the back. We can then split the slice.
+ // This operation is still `O(n)`.
+ //
+ // Example: We start in this state, where `r` represents "next
+ // read" and `w` represents "next_write`.
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 1 | 2 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Comparing self[r] against self[w-1], this is not a duplicate, so
+ // we swap self[r] and self[w] (no effect as r==w) and then increment both
+ // r and w, leaving us with:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 1 | 2 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Comparing self[r] against self[w-1], this value is a duplicate,
+ // so we increment `r` but leave everything else unchanged:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 1 | 2 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Comparing self[r] against self[w-1], this is not a duplicate,
+ // so swap self[r] and self[w] and advance r and w:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 2 | 1 | 3 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Not a duplicate, repeat:
+ //
+ // r
+ // +---+---+---+---+---+---+
+ // | 0 | 1 | 2 | 3 | 1 | 3 |
+ // +---+---+---+---+---+---+
+ // w
+ //
+ // Duplicate, advance r. End of slice. Split at w.
+
+ let len = self.len();
+ if len <= 1 {
+ return (self, &mut []);
+ }
+
+ let ptr = self.as_mut_ptr();
+ let mut next_read: usize = 1;
+ let mut next_write: usize = 1;
+
+ // SAFETY: the `while` condition guarantees `next_read` and `next_write`
+ // are less than `len`, thus are inside `self`. `prev_ptr_write` points to
+ // one element before `ptr_write`, but `next_write` starts at 1, so
+ // `prev_ptr_write` is never less than 0 and is inside the slice.
+ // This fulfils the requirements for dereferencing `ptr_read`, `prev_ptr_write`
+ // and `ptr_write`, and for using `ptr.add(next_read)`, `ptr.add(next_write - 1)`
+ // and `prev_ptr_write.offset(1)`.
+ //
+ // `next_write` is also incremented at most once per loop at most meaning
+ // no element is skipped when it may need to be swapped.
+ //
+ // `ptr_read` and `prev_ptr_write` never point to the same element. This
+ // is required for `&mut *ptr_read`, `&mut *prev_ptr_write` to be safe.
+ // The explanation is simply that `next_read >= next_write` is always true,
+ // thus `next_read > next_write - 1` is too.
+ unsafe {
+ // Avoid bounds checks by using raw pointers.
+ while next_read < len {
+ let ptr_read = ptr.add(next_read);
+ let prev_ptr_write = ptr.add(next_write - 1);
+ if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
+ if next_read != next_write {
+ let ptr_write = prev_ptr_write.offset(1);
+ mem::swap(&mut *ptr_read, &mut *ptr_write);
+ }
+ next_write += 1;
+ }
+ next_read += 1;
+ }
+ }
+
+ self.split_at_mut(next_write)
+ }
+
+ /// Moves all but the first of consecutive elements to the end of the slice that resolve
+ /// to the same key.
+ ///
+ /// Returns two slices. The first contains no consecutive repeated elements.
+ /// The second contains all the duplicates in no specified order.
+ ///
+ /// If the slice is sorted, the first returned slice contains no duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_partition_dedup)]
+ ///
+ /// let mut slice = [10, 20, 21, 30, 30, 20, 11, 13];
+ ///
+ /// let (dedup, duplicates) = slice.partition_dedup_by_key(|i| *i / 10);
+ ///
+ /// assert_eq!(dedup, [10, 20, 30, 20, 11]);
+ /// assert_eq!(duplicates, [21, 30, 13]);
+ /// ```
+ #[unstable(feature = "slice_partition_dedup", issue = "54279")]
+ #[inline]
+ pub fn partition_dedup_by_key<K, F>(&mut self, mut key: F) -> (&mut [T], &mut [T])
+ where
+ F: FnMut(&mut T) -> K,
+ K: PartialEq,
+ {
+ self.partition_dedup_by(|a, b| key(a) == key(b))
+ }
+
+ /// Rotates the slice in-place such that the first `mid` elements of the
+ /// slice move to the end while the last `self.len() - mid` elements move to
+ /// the front. After calling `rotate_left`, the element previously at index
+ /// `mid` will become the first element in the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `mid` is greater than the length of the
+ /// slice. Note that `mid == self.len()` does _not_ panic and is a no-op
+ /// rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes linear (in `self.len()`) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a.rotate_left(2);
+ /// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']);
+ /// ```
+ ///
+ /// Rotating a subslice:
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a[1..5].rotate_left(1);
+ /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']);
+ /// ```
+ #[stable(feature = "slice_rotate", since = "1.26.0")]
+ pub fn rotate_left(&mut self, mid: usize) {
+ assert!(mid <= self.len());
+ let k = self.len() - mid;
+ let p = self.as_mut_ptr();
+
+ // SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
+ // valid for reading and writing, as required by `ptr_rotate`.
+ unsafe {
+ rotate::ptr_rotate(mid, p.add(mid), k);
+ }
+ }
+
+ /// Rotates the slice in-place such that the first `self.len() - k`
+ /// elements of the slice move to the end while the last `k` elements move
+ /// to the front. After calling `rotate_right`, the element previously at
+ /// index `self.len() - k` will become the first element in the slice.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if `k` is greater than the length of the
+ /// slice. Note that `k == self.len()` does _not_ panic and is a no-op
+ /// rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes linear (in `self.len()`) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a.rotate_right(2);
+ /// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']);
+ /// ```
+ ///
+ /// Rotate a subslice:
+ ///
+ /// ```
+ /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f'];
+ /// a[1..5].rotate_right(1);
+ /// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']);
+ /// ```
+ #[stable(feature = "slice_rotate", since = "1.26.0")]
+ pub fn rotate_right(&mut self, k: usize) {
+ assert!(k <= self.len());
+ let mid = self.len() - k;
+ let p = self.as_mut_ptr();
+
+ // SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
+ // valid for reading and writing, as required by `ptr_rotate`.
+ unsafe {
+ rotate::ptr_rotate(mid, p.add(mid), k);
+ }
+ }
+
+ /// Fills `self` with elements by cloning `value`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_fill)]
+ ///
+ /// let mut buf = vec![0; 10];
+ /// buf.fill(1);
+ /// assert_eq!(buf, vec![1; 10]);
+ /// ```
+ #[unstable(feature = "slice_fill", issue = "70758")]
+ pub fn fill(&mut self, value: T)
+ where
+ T: Clone,
+ {
+ if let Some((last, elems)) = self.split_last_mut() {
+ for el in elems {
+ el.clone_from(&value);
+ }
+
+ *last = value
+ }
+ }
+
+ /// Copies the elements from `src` into `self`.
+ ///
+ /// The length of `src` must be the same as `self`.
+ ///
+ /// If `T` implements `Copy`, it can be more performant to use
+ /// [`copy_from_slice`].
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Examples
+ ///
+ /// Cloning two elements from a slice into another:
+ ///
+ /// ```
+ /// let src = [1, 2, 3, 4];
+ /// let mut dst = [0, 0];
+ ///
+ /// // Because the slices have to be the same length,
+ /// // we slice the source slice from four elements
+ /// // to two. It will panic if we don't do this.
+ /// dst.clone_from_slice(&src[2..]);
+ ///
+ /// assert_eq!(src, [1, 2, 3, 4]);
+ /// assert_eq!(dst, [3, 4]);
+ /// ```
+ ///
+ /// Rust enforces that there can only be one mutable reference with no
+ /// immutable references to a particular piece of data in a particular
+ /// scope. Because of this, attempting to use `clone_from_slice` on a
+ /// single slice will result in a compile failure:
+ ///
+ /// ```compile_fail
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// slice[..2].clone_from_slice(&slice[3..]); // compile fail!
+ /// ```
+ ///
+ /// To work around this, we can use [`split_at_mut`] to create two distinct
+ /// sub-slices from a slice:
+ ///
+ /// ```
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// {
+ /// let (left, right) = slice.split_at_mut(2);
+ /// left.clone_from_slice(&right[1..]);
+ /// }
+ ///
+ /// assert_eq!(slice, [4, 5, 3, 4, 5]);
+ /// ```
+ ///
+ /// [`copy_from_slice`]: #method.copy_from_slice
+ /// [`split_at_mut`]: #method.split_at_mut
+ #[stable(feature = "clone_from_slice", since = "1.7.0")]
+ pub fn clone_from_slice(&mut self, src: &[T])
+ where
+ T: Clone,
+ {
+ assert!(self.len() == src.len(), "destination and source slices have different lengths");
+ // NOTE: We need to explicitly slice them to the same length
+ // for bounds checking to be elided, and the optimizer will
+ // generate memcpy for simple cases (for example T = u8).
+ let len = self.len();
+ let src = &src[..len];
+ for i in 0..len {
+ self[i].clone_from(&src[i]);
+ }
+ }
+
+ /// Copies all elements from `src` into `self`, using a memcpy.
+ ///
+ /// The length of `src` must be the same as `self`.
+ ///
+ /// If `T` does not implement `Copy`, use [`clone_from_slice`].
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Examples
+ ///
+ /// Copying two elements from a slice into another:
+ ///
+ /// ```
+ /// let src = [1, 2, 3, 4];
+ /// let mut dst = [0, 0];
+ ///
+ /// // Because the slices have to be the same length,
+ /// // we slice the source slice from four elements
+ /// // to two. It will panic if we don't do this.
+ /// dst.copy_from_slice(&src[2..]);
+ ///
+ /// assert_eq!(src, [1, 2, 3, 4]);
+ /// assert_eq!(dst, [3, 4]);
+ /// ```
+ ///
+ /// Rust enforces that there can only be one mutable reference with no
+ /// immutable references to a particular piece of data in a particular
+ /// scope. Because of this, attempting to use `copy_from_slice` on a
+ /// single slice will result in a compile failure:
+ ///
+ /// ```compile_fail
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// slice[..2].copy_from_slice(&slice[3..]); // compile fail!
+ /// ```
+ ///
+ /// To work around this, we can use [`split_at_mut`] to create two distinct
+ /// sub-slices from a slice:
+ ///
+ /// ```
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// {
+ /// let (left, right) = slice.split_at_mut(2);
+ /// left.copy_from_slice(&right[1..]);
+ /// }
+ ///
+ /// assert_eq!(slice, [4, 5, 3, 4, 5]);
+ /// ```
+ ///
+ /// [`clone_from_slice`]: #method.clone_from_slice
+ /// [`split_at_mut`]: #method.split_at_mut
+ #[stable(feature = "copy_from_slice", since = "1.9.0")]
+ pub fn copy_from_slice(&mut self, src: &[T])
+ where
+ T: Copy,
+ {
+ // The panic code path was put into a cold function to not bloat the
+ // call site.
+ #[inline(never)]
+ #[cold]
+ #[track_caller]
+ fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
+ panic!(
+ "source slice length ({}) does not match destination slice length ({})",
+ src_len, dst_len,
+ );
+ }
+
+ if self.len() != src.len() {
+ len_mismatch_fail(self.len(), src.len());
+ }
+
+ // SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
+ // checked to have the same length. The slices cannot overlap because
+ // mutable references are exclusive.
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
+ }
+ }
+
+ /// Copies elements from one part of the slice to another part of itself,
+ /// using a memmove.
+ ///
+ /// `src` is the range within `self` to copy from. `dest` is the starting
+ /// index of the range within `self` to copy to, which will have the same
+ /// length as `src`. The two ranges may overlap. The ends of the two ranges
+ /// must be less than or equal to `self.len()`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if either range exceeds the end of the slice,
+ /// or if the end of `src` is before the start.
+ ///
+ /// # Examples
+ ///
+ /// Copying four bytes within a slice:
+ ///
+ /// ```
+ /// let mut bytes = *b"Hello, World!";
+ ///
+ /// bytes.copy_within(1..5, 8);
+ ///
+ /// assert_eq!(&bytes, b"Hello, Wello!");
+ /// ```
+ #[stable(feature = "copy_within", since = "1.37.0")]
+ #[track_caller]
+ pub fn copy_within<R: RangeBounds<usize>>(&mut self, src: R, dest: usize)
+ where
+ T: Copy,
+ {
+ let Range { start: src_start, end: src_end } = src.assert_len(self.len());
+ let count = src_end - src_start;
+ assert!(dest <= self.len() - count, "dest is out of bounds");
+ // SAFETY: the conditions for `ptr::copy` have all been checked above,
+ // as have those for `ptr::add`.
+ unsafe {
+ ptr::copy(self.as_ptr().add(src_start), self.as_mut_ptr().add(dest), count);
+ }
+ }
+
+ /// Swaps all elements in `self` with those in `other`.
+ ///
+ /// The length of `other` must be the same as `self`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Example
+ ///
+ /// Swapping two elements across slices:
+ ///
+ /// ```
+ /// let mut slice1 = [0, 0];
+ /// let mut slice2 = [1, 2, 3, 4];
+ ///
+ /// slice1.swap_with_slice(&mut slice2[2..]);
+ ///
+ /// assert_eq!(slice1, [3, 4]);
+ /// assert_eq!(slice2, [1, 2, 0, 0]);
+ /// ```
+ ///
+ /// Rust enforces that there can only be one mutable reference to a
+ /// particular piece of data in a particular scope. Because of this,
+ /// attempting to use `swap_with_slice` on a single slice will result in
+ /// a compile failure:
+ ///
+ /// ```compile_fail
+ /// let mut slice = [1, 2, 3, 4, 5];
+ /// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail!
+ /// ```
+ ///
+ /// To work around this, we can use [`split_at_mut`] to create two distinct
+ /// mutable sub-slices from a slice:
+ ///
+ /// ```
+ /// let mut slice = [1, 2, 3, 4, 5];
+ ///
+ /// {
+ /// let (left, right) = slice.split_at_mut(2);
+ /// left.swap_with_slice(&mut right[1..]);
+ /// }
+ ///
+ /// assert_eq!(slice, [4, 5, 3, 1, 2]);
+ /// ```
+ ///
+ /// [`split_at_mut`]: #method.split_at_mut
+ #[stable(feature = "swap_with_slice", since = "1.27.0")]
+ pub fn swap_with_slice(&mut self, other: &mut [T]) {
+ assert!(self.len() == other.len(), "destination and source slices have different lengths");
+ // SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
+ // checked to have the same length. The slices cannot overlap because
+ // mutable references are exclusive.
+ unsafe {
+ ptr::swap_nonoverlapping(self.as_mut_ptr(), other.as_mut_ptr(), self.len());
+ }
+ }
+
+ /// Function to calculate lengths of the middle and trailing slice for `align_to{,_mut}`.
+ fn align_to_offsets<U>(&self) -> (usize, usize) {
+ // What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
+ // lowest number of `T`s. And how many `T`s we need for each such "multiple".
+ //
+ // Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
+ // for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
+ // place of every 3 Ts in the `rest` slice. A bit more complicated.
+ //
+ // Formula to calculate this is:
+ //
+ // Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
+ // Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
+ //
+ // Expanded and simplified:
+ //
+ // Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
+ // Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
+ //
+ // Luckily since all this is constant-evaluated... performance here matters not!
+ #[inline]
+ fn gcd(a: usize, b: usize) -> usize {
+ use crate::intrinsics;
+ // iterative stein’s algorithm
+ // We should still make this `const fn` (and revert to recursive algorithm if we do)
+ // because relying on llvm to consteval all this is… well, it makes me uncomfortable.
+
+ // SAFETY: `a` and `b` are checked to be non-zero values.
+ let (ctz_a, mut ctz_b) = unsafe {
+ if a == 0 {
+ return b;
+ }
+ if b == 0 {
+ return a;
+ }
+ (intrinsics::cttz_nonzero(a), intrinsics::cttz_nonzero(b))
+ };
+ let k = ctz_a.min(ctz_b);
+ let mut a = a >> ctz_a;
+ let mut b = b;
+ loop {
+ // remove all factors of 2 from b
+ b >>= ctz_b;
+ if a > b {
+ mem::swap(&mut a, &mut b);
+ }
+ b = b - a;
+ // SAFETY: `b` is checked to be non-zero.
+ unsafe {
+ if b == 0 {
+ break;
+ }
+ ctz_b = intrinsics::cttz_nonzero(b);
+ }
+ }
+ a << k
+ }
+ let gcd: usize = gcd(mem::size_of::<T>(), mem::size_of::<U>());
+ let ts: usize = mem::size_of::<U>() / gcd;
+ let us: usize = mem::size_of::<T>() / gcd;
+
+ // Armed with this knowledge, we can find how many `U`s we can fit!
+ let us_len = self.len() / ts * us;
+ // And how many `T`s will be in the trailing slice!
+ let ts_len = self.len() % ts;
+ (us_len, ts_len)
+ }
+
+ /// Transmute the slice to a slice of another type, ensuring alignment of the types is
+ /// maintained.
+ ///
+ /// This method splits the slice into three distinct slices: prefix, correctly aligned middle
+ /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
+ /// length possible for a given type and input slice, but only your algorithm's performance
+ /// should depend on that, not its correctness. It is permissible for all of the input data to
+ /// be returned as the prefix or suffix slice.
+ ///
+ /// This method has no purpose when either input element `T` or output element `U` are
+ /// zero-sized and will return the original slice without splitting anything.
+ ///
+ /// # Safety
+ ///
+ /// This method is essentially a `transmute` with respect to the elements in the returned
+ /// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// unsafe {
+ /// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ /// let (prefix, shorts, suffix) = bytes.align_to::<u16>();
+ /// // less_efficient_algorithm_for_bytes(prefix);
+ /// // more_efficient_algorithm_for_aligned_shorts(shorts);
+ /// // less_efficient_algorithm_for_bytes(suffix);
+ /// }
+ /// ```
+ #[stable(feature = "slice_align_to", since = "1.30.0")]
+ pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
+ // Note that most of this function will be constant-evaluated,
+ if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
+ // handle ZSTs specially, which is – don't handle them at all.
+ return (self, &[], &[]);
+ }
+
+ // First, find at what point do we split between the first and 2nd slice. Easy with
+ // ptr.align_offset.
+ let ptr = self.as_ptr();
+ // SAFETY: See the `align_to_mut` method for the detailed safety comment.
+ let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+ if offset > self.len() {
+ (self, &[], &[])
+ } else {
+ let (left, rest) = self.split_at(offset);
+ let (us_len, ts_len) = rest.align_to_offsets::<U>();
+ // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
+ // since the caller guarantees that we can transmute `T` to `U` safely.
+ unsafe {
+ (
+ left,
+ from_raw_parts(rest.as_ptr() as *const U, us_len),
+ from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
+ )
+ }
+ }
+ }
+
+ /// Transmute the slice to a slice of another type, ensuring alignment of the types is
+ /// maintained.
+ ///
+ /// This method splits the slice into three distinct slices: prefix, correctly aligned middle
+ /// slice of a new type, and the suffix slice. The method may make the middle slice the greatest
+ /// length possible for a given type and input slice, but only your algorithm's performance
+ /// should depend on that, not its correctness. It is permissible for all of the input data to
+ /// be returned as the prefix or suffix slice.
+ ///
+ /// This method has no purpose when either input element `T` or output element `U` are
+ /// zero-sized and will return the original slice without splitting anything.
+ ///
+ /// # Safety
+ ///
+ /// This method is essentially a `transmute` with respect to the elements in the returned
+ /// middle slice, so all the usual caveats pertaining to `transmute::<T, U>` also apply here.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// unsafe {
+ /// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7];
+ /// let (prefix, shorts, suffix) = bytes.align_to_mut::<u16>();
+ /// // less_efficient_algorithm_for_bytes(prefix);
+ /// // more_efficient_algorithm_for_aligned_shorts(shorts);
+ /// // less_efficient_algorithm_for_bytes(suffix);
+ /// }
+ /// ```
+ #[stable(feature = "slice_align_to", since = "1.30.0")]
+ pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
+ // Note that most of this function will be constant-evaluated,
+ if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
+ // handle ZSTs specially, which is – don't handle them at all.
+ return (self, &mut [], &mut []);
+ }
+
+ // First, find at what point do we split between the first and 2nd slice. Easy with
+ // ptr.align_offset.
+ let ptr = self.as_ptr();
+ // SAFETY: Here we are ensuring we will use aligned pointers for U for the
+ // rest of the method. This is done by passing a pointer to &[T] with an
+ // alignment targeted for U.
+ // `crate::ptr::align_offset` is called with a correctly aligned and
+ // valid pointer `ptr` (it comes from a reference to `self`) and with
+ // a size that is a power of two (since it comes from the alignement for U),
+ // satisfying its safety constraints.
+ let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
+ if offset > self.len() {
+ (self, &mut [], &mut [])
+ } else {
+ let (left, rest) = self.split_at_mut(offset);
+ let (us_len, ts_len) = rest.align_to_offsets::<U>();
+ let rest_len = rest.len();
+ let mut_ptr = rest.as_mut_ptr();
+ // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
+ // SAFETY: see comments for `align_to`.
+ unsafe {
+ (
+ left,
+ from_raw_parts_mut(mut_ptr as *mut U, us_len),
+ from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
+ )
+ }
+ }
+ }
+
+ /// Checks if the elements of this slice are sorted.
+ ///
+ /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
+ /// slice yields exactly zero or one element, `true` is returned.
+ ///
+ /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition
+ /// implies that this function returns `false` if any two consecutive items are not
+ /// comparable.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ /// let empty: [i32; 0] = [];
+ ///
+ /// assert!([1, 2, 2, 9].is_sorted());
+ /// assert!(![1, 3, 2, 4].is_sorted());
+ /// assert!([0].is_sorted());
+ /// assert!(empty.is_sorted());
+ /// assert!(![0.0, 1.0, f32::NAN].is_sorted());
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ pub fn is_sorted(&self) -> bool
+ where
+ T: PartialOrd,
+ {
+ self.is_sorted_by(|a, b| a.partial_cmp(b))
+ }
+
+ /// Checks if the elements of this slice are sorted using the given comparator function.
+ ///
+ /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare`
+ /// function to determine the ordering of two elements. Apart from that, it's equivalent to
+ /// [`is_sorted`]; see its documentation for more information.
+ ///
+ /// [`is_sorted`]: #method.is_sorted
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
+ where
+ F: FnMut(&T, &T) -> Option<Ordering>,
+ {
+ self.iter().is_sorted_by(|a, b| compare(*a, *b))
+ }
+
+ /// Checks if the elements of this slice are sorted using the given key extraction function.
+ ///
+ /// Instead of comparing the slice's elements directly, this function compares the keys of the
+ /// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its
+ /// documentation for more information.
+ ///
+ /// [`is_sorted`]: #method.is_sorted
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(is_sorted)]
+ ///
+ /// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+ /// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
+ /// ```
+ #[inline]
+ #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
+ where
+ F: FnMut(&T) -> K,
+ K: PartialOrd,
+ {
+ self.iter().is_sorted_by_key(f)
+ }
+
+ /// Returns the index of the partition point according to the given predicate
+ /// (the index of the first element of the second partition).
+ ///
+ /// The slice is assumed to be partitioned according to the given predicate.
+ /// This means that all elements for which the predicate returns true are at the start of the slice
+ /// and all elements for which the predicate returns false are at the end.
+ /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// (all odd numbers are at the start, all even at the end).
+ ///
+ /// If this slice is not partitioned, the returned result is unspecified and meaningless,
+ /// as this method performs a kind of binary search.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(partition_point)]
+ ///
+ /// let v = [1, 2, 3, 3, 5, 6, 7];
+ /// let i = v.partition_point(|&x| x < 5);
+ ///
+ /// assert_eq!(i, 4);
+ /// assert!(v[..i].iter().all(|&x| x < 5));
+ /// assert!(v[i..].iter().all(|&x| !(x < 5)));
+ /// ```
+ #[unstable(feature = "partition_point", reason = "new API", issue = "73831")]
+ pub fn partition_point<P>(&self, mut pred: P) -> usize
+ where
+ P: FnMut(&T) -> bool,
+ {
+ let mut left = 0;
+ let mut right = self.len();
+
+ while left != right {
+ let mid = left + (right - left) / 2;
+ // SAFETY: When `left < right`, `left <= mid < right`.
+ // Therefore `left` always increases and `right` always decreases,
+ // and either of them is selected. In both cases `left <= right` is
+ // satisfied. Therefore if `left < right` in a step, `left <= right`
+ // is satisfied in the next step. Therefore as long as `left != right`,
+ // `0 <= left < right <= len` is satisfied and if this case
+ // `0 <= mid < len` is satisfied too.
+ let value = unsafe { self.get_unchecked(mid) };
+ if pred(value) {
+ left = mid + 1;
+ } else {
+ right = mid;
+ }
+ }
+
+ left
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for &[T] {
+ /// Creates an empty slice.
+ fn default() -> Self {
+ &[]
+ }
+}
+
+#[stable(feature = "mut_slice_default", since = "1.5.0")]
+impl<T> Default for &mut [T] {
+ /// Creates a mutable empty slice.
+ fn default() -> Self {
+ &mut []
+ }
+}
--- /dev/null
+//! Free functions to create `&[T]` and `&mut [T]`.
+
+use crate::array;
+use crate::intrinsics::is_aligned_and_not_null;
+use crate::mem;
+use crate::ptr;
+
+/// Forms a slice from a pointer and a length.
+///
+/// The `len` argument is the number of **elements**, not the number of bytes.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `data` must be [valid] for reads for `len * mem::size_of::<T>()` many bytes,
+/// and it must be properly aligned. This means in particular:
+///
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects. See [below](#incorrect-usage)
+/// for an example incorrectly not taking this into account.
+/// * `data` must be non-null and aligned even for zero-length slices. One
+/// reason for this is that enum layout optimizations may rely on references
+/// (including slices of any length) being aligned and non-null to distinguish
+/// them from other data. You can obtain a pointer that is usable as `data`
+/// for zero-length slices using [`NonNull::dangling()`].
+///
+/// * `data` must point to `len` consecutive properly initialized values of type `T`.
+///
+/// * The memory referenced by the returned slice must not be mutated for the duration
+/// of lifetime `'a`, except inside an `UnsafeCell`.
+///
+/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+/// See the safety documentation of [`pointer::offset`].
+///
+/// # Caveat
+///
+/// The lifetime for the returned slice is inferred from its usage. To
+/// prevent accidental misuse, it's suggested to tie the lifetime to whichever
+/// source lifetime is safe in the context, such as by providing a helper
+/// function taking the lifetime of a host value for the slice, or by explicit
+/// annotation.
+///
+/// # Examples
+///
+/// ```
+/// use std::slice;
+///
+/// // manifest a slice for a single element
+/// let x = 42;
+/// let ptr = &x as *const _;
+/// let slice = unsafe { slice::from_raw_parts(ptr, 1) };
+/// assert_eq!(slice[0], 42);
+/// ```
+///
+/// ### Incorrect usage
+///
+/// The following `join_slices` function is **unsound** ⚠️
+///
+/// ```rust,no_run
+/// use std::slice;
+///
+/// fn join_slices<'a, T>(fst: &'a [T], snd: &'a [T]) -> &'a [T] {
+/// let fst_end = fst.as_ptr().wrapping_add(fst.len());
+/// let snd_start = snd.as_ptr();
+/// assert_eq!(fst_end, snd_start, "Slices must be contiguous!");
+/// unsafe {
+/// // The assertion above ensures `fst` and `snd` are contiguous, but they might
+/// // still be contained within _different allocated objects_, in which case
+/// // creating this slice is undefined behavior.
+/// slice::from_raw_parts(fst.as_ptr(), fst.len() + snd.len())
+/// }
+/// }
+///
+/// fn main() {
+/// // `a` and `b` are different allocated objects...
+/// let a = 42;
+/// let b = 27;
+/// // ... which may nevertheless be laid out contiguously in memory: | a | b |
+/// let _ = join_slices(slice::from_ref(&a), slice::from_ref(&b)); // UB
+/// }
+/// ```
+///
+/// [valid]: ptr#safety
+/// [`NonNull::dangling()`]: ptr::NonNull::dangling
+/// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
+ debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
+ debug_assert!(
+ mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
+ "attempt to create slice covering at least half the address space"
+ );
+ // SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
+ unsafe { &*ptr::slice_from_raw_parts(data, len) }
+}
+
+/// Performs the same functionality as [`from_raw_parts`], except that a
+/// mutable slice is returned.
+///
+/// # Safety
+///
+/// Behavior is undefined if any of the following conditions are violated:
+///
+/// * `data` must be [valid] for boths reads and writes for `len * mem::size_of::<T>()` many bytes,
+/// and it must be properly aligned. This means in particular:
+///
+/// * The entire memory range of this slice must be contained within a single allocated object!
+/// Slices can never span across multiple allocated objects.
+/// * `data` must be non-null and aligned even for zero-length slices. One
+/// reason for this is that enum layout optimizations may rely on references
+/// (including slices of any length) being aligned and non-null to distinguish
+/// them from other data. You can obtain a pointer that is usable as `data`
+/// for zero-length slices using [`NonNull::dangling()`].
+///
+/// * `data` must point to `len` consecutive properly initialized values of type `T`.
+///
+/// * The memory referenced by the returned slice must not be accessed through any other pointer
+/// (not derived from the return value) for the duration of lifetime `'a`.
+/// Both read and write accesses are forbidden.
+///
+/// * The total size `len * mem::size_of::<T>()` of the slice must be no larger than `isize::MAX`.
+/// See the safety documentation of [`pointer::offset`].
+///
+/// [valid]: ptr#safety
+/// [`NonNull::dangling()`]: ptr::NonNull::dangling
+/// [`pointer::offset`]: ../../std/primitive.pointer.html#method.offset
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
+ debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
+ debug_assert!(
+ mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
+ "attempt to create slice covering at least half the address space"
+ );
+ // SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
+ unsafe { &mut *ptr::slice_from_raw_parts_mut(data, len) }
+}
+
+/// Converts a reference to T into a slice of length 1 (without copying).
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub fn from_ref<T>(s: &T) -> &[T] {
+ array::from_ref(s)
+}
+
+/// Converts a reference to T into a slice of length 1 (without copying).
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub fn from_mut<T>(s: &mut T) -> &mut [T] {
+ array::from_mut(s)
+}
--- /dev/null
+// ignore-tidy-undocumented-unsafe
+
+use crate::cmp;
+use crate::mem::{self, MaybeUninit};
+use crate::ptr;
+
+/// Rotates the range `[mid-left, mid+right)` such that the element at `mid` becomes the first
+/// element. Equivalently, rotates the range `left` elements to the left or `right` elements to the
+/// right.
+///
+/// # Safety
+///
+/// The specified range must be valid for reading and writing.
+///
+/// # Algorithm
+///
+/// Algorithm 1 is used for small values of `left + right` or for large `T`. The elements are moved
+/// into their final positions one at a time starting at `mid - left` and advancing by `right` steps
+/// modulo `left + right`, such that only one temporary is needed. Eventually, we arrive back at
+/// `mid - left`. However, if `gcd(left + right, right)` is not 1, the above steps skipped over
+/// elements. For example:
+/// ```text
+/// left = 10, right = 6
+/// the `^` indicates an element in its final place
+/// 6 7 8 9 10 11 12 13 14 15 . 0 1 2 3 4 5
+/// after using one step of the above algorithm (The X will be overwritten at the end of the round,
+/// and 12 is stored in a temporary):
+/// X 7 8 9 10 11 6 13 14 15 . 0 1 2 3 4 5
+/// ^
+/// after using another step (now 2 is in the temporary):
+/// X 7 8 9 10 11 6 13 14 15 . 0 1 12 3 4 5
+/// ^ ^
+/// after the third step (the steps wrap around, and 8 is in the temporary):
+/// X 7 2 9 10 11 6 13 14 15 . 0 1 12 3 4 5
+/// ^ ^ ^
+/// after 7 more steps, the round ends with the temporary 0 getting put in the X:
+/// 0 7 2 9 4 11 6 13 8 15 . 10 1 12 3 14 5
+/// ^ ^ ^ ^ ^ ^ ^ ^
+/// ```
+/// Fortunately, the number of skipped over elements between finalized elements is always equal, so
+/// we can just offset our starting position and do more rounds (the total number of rounds is the
+/// `gcd(left + right, right)` value). The end result is that all elements are finalized once and
+/// only once.
+///
+/// Algorithm 2 is used if `left + right` is large but `min(left, right)` is small enough to
+/// fit onto a stack buffer. The `min(left, right)` elements are copied onto the buffer, `memmove`
+/// is applied to the others, and the ones on the buffer are moved back into the hole on the
+/// opposite side of where they originated.
+///
+/// Algorithms that can be vectorized outperform the above once `left + right` becomes large enough.
+/// Algorithm 1 can be vectorized by chunking and performing many rounds at once, but there are too
+/// few rounds on average until `left + right` is enormous, and the worst case of a single
+/// round is always there. Instead, algorithm 3 utilizes repeated swapping of
+/// `min(left, right)` elements until a smaller rotate problem is left.
+///
+/// ```text
+/// left = 11, right = 4
+/// [4 5 6 7 8 9 10 11 12 13 14 . 0 1 2 3]
+/// ^ ^ ^ ^ ^ ^ ^ ^ swapping the right most elements with elements to the left
+/// [4 5 6 7 8 9 10 . 0 1 2 3] 11 12 13 14
+/// ^ ^ ^ ^ ^ ^ ^ ^ swapping these
+/// [4 5 6 . 0 1 2 3] 7 8 9 10 11 12 13 14
+/// we cannot swap any more, but a smaller rotation problem is left to solve
+/// ```
+/// when `left < right` the swapping happens from the left instead.
+pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize) {
+ type BufType = [usize; 32];
+ if mem::size_of::<T>() == 0 {
+ return;
+ }
+ loop {
+ // N.B. the below algorithms can fail if these cases are not checked
+ if (right == 0) || (left == 0) {
+ return;
+ }
+ if (left + right < 24) || (mem::size_of::<T>() > mem::size_of::<[usize; 4]>()) {
+ // Algorithm 1
+ // Microbenchmarks indicate that the average performance for random shifts is better all
+ // the way until about `left + right == 32`, but the worst case performance breaks even
+ // around 16. 24 was chosen as middle ground. If the size of `T` is larger than 4
+ // `usize`s, this algorithm also outperforms other algorithms.
+ let x = unsafe { mid.sub(left) };
+ // beginning of first round
+ let mut tmp: T = unsafe { x.read() };
+ let mut i = right;
+ // `gcd` can be found before hand by calculating `gcd(left + right, right)`,
+ // but it is faster to do one loop which calculates the gcd as a side effect, then
+ // doing the rest of the chunk
+ let mut gcd = right;
+ // benchmarks reveal that it is faster to swap temporaries all the way through instead
+ // of reading one temporary once, copying backwards, and then writing that temporary at
+ // the very end. This is possibly due to the fact that swapping or replacing temporaries
+ // uses only one memory address in the loop instead of needing to manage two.
+ loop {
+ tmp = unsafe { x.add(i).replace(tmp) };
+ // instead of incrementing `i` and then checking if it is outside the bounds, we
+ // check if `i` will go outside the bounds on the next increment. This prevents
+ // any wrapping of pointers or `usize`.
+ if i >= left {
+ i -= left;
+ if i == 0 {
+ // end of first round
+ unsafe { x.write(tmp) };
+ break;
+ }
+ // this conditional must be here if `left + right >= 15`
+ if i < gcd {
+ gcd = i;
+ }
+ } else {
+ i += right;
+ }
+ }
+ // finish the chunk with more rounds
+ for start in 1..gcd {
+ tmp = unsafe { x.add(start).read() };
+ i = start + right;
+ loop {
+ tmp = unsafe { x.add(i).replace(tmp) };
+ if i >= left {
+ i -= left;
+ if i == start {
+ unsafe { x.add(start).write(tmp) };
+ break;
+ }
+ } else {
+ i += right;
+ }
+ }
+ }
+ return;
+ // `T` is not a zero-sized type, so it's okay to divide by its size.
+ } else if cmp::min(left, right) <= mem::size_of::<BufType>() / mem::size_of::<T>() {
+ // Algorithm 2
+ // The `[T; 0]` here is to ensure this is appropriately aligned for T
+ let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit();
+ let buf = rawarray.as_mut_ptr() as *mut T;
+ let dim = unsafe { mid.sub(left).add(right) };
+ if left <= right {
+ unsafe {
+ ptr::copy_nonoverlapping(mid.sub(left), buf, left);
+ ptr::copy(mid, mid.sub(left), right);
+ ptr::copy_nonoverlapping(buf, dim, left);
+ }
+ } else {
+ unsafe {
+ ptr::copy_nonoverlapping(mid, buf, right);
+ ptr::copy(mid.sub(left), dim, left);
+ ptr::copy_nonoverlapping(buf, mid.sub(left), right);
+ }
+ }
+ return;
+ } else if left >= right {
+ // Algorithm 3
+ // There is an alternate way of swapping that involves finding where the last swap
+ // of this algorithm would be, and swapping using that last chunk instead of swapping
+ // adjacent chunks like this algorithm is doing, but this way is still faster.
+ loop {
+ unsafe {
+ ptr::swap_nonoverlapping(mid.sub(right), mid, right);
+ mid = mid.sub(right);
+ }
+ left -= right;
+ if left < right {
+ break;
+ }
+ }
+ } else {
+ // Algorithm 3, `left < right`
+ loop {
+ unsafe {
+ ptr::swap_nonoverlapping(mid.sub(left), mid, left);
+ mid = mid.add(left);
+ }
+ right -= left;
+ if right < left {
+ break;
+ }
+ }
+ }
+ }
+}
--- /dev/null
+//! Slice sorting
+//!
+//! This module contains a sorting algorithm based on Orson Peters' pattern-defeating quicksort,
+//! published at: <https://github.com/orlp/pdqsort>
+//!
+//! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our
+//! stable sorting implementation.
+
+// ignore-tidy-undocumented-unsafe
+
+use crate::cmp;
+use crate::mem::{self, MaybeUninit};
+use crate::ptr;
+
+/// When dropped, copies from `src` into `dest`.
+struct CopyOnDrop<T> {
+ src: *mut T,
+ dest: *mut T,
+}
+
+impl<T> Drop for CopyOnDrop<T> {
+ fn drop(&mut self) {
+ // SAFETY: This is a helper class.
+ // Please refer to its usage for correctness.
+ // Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+}
+
+/// Shifts the first element to the right until it encounters a greater or equal element.
+fn shift_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ // SAFETY: The unsafe operations below involves indexing without a bound check (`get_unchecked` and `get_unchecked_mut`)
+ // and copying memory (`ptr::copy_nonoverlapping`).
+ //
+ // a. Indexing:
+ // 1. We checked the size of the array to >=2.
+ // 2. All the indexing that we will do is always between {0 <= index < len} at most.
+ //
+ // b. Memory copying
+ // 1. We are obtaining pointers to references which are guaranteed to be valid.
+ // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
+ // Namely, `i` and `i-1`.
+ // 3. If the slice is properly aligned, the elements are properly aligned.
+ // It is the caller's responsibility to make sure the slice is properly aligned.
+ //
+ // See comments below for further detail.
+ unsafe {
+ // If the first two elements are out-of-order...
+ if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) {
+ // Read the first element into a stack-allocated variable. If a following comparison
+ // operation panics, `hole` will get dropped and automatically write the element back
+ // into the slice.
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(0)));
+ let mut hole = CopyOnDrop { src: &mut *tmp, dest: v.get_unchecked_mut(1) };
+ ptr::copy_nonoverlapping(v.get_unchecked(1), v.get_unchecked_mut(0), 1);
+
+ for i in 2..len {
+ if !is_less(v.get_unchecked(i), &*tmp) {
+ break;
+ }
+
+ // Move `i`-th element one place to the left, thus shifting the hole to the right.
+ ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i - 1), 1);
+ hole.dest = v.get_unchecked_mut(i);
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+}
+
+/// Shifts the last element to the left until it encounters a smaller or equal element.
+fn shift_tail<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ // SAFETY: The unsafe operations below involves indexing without a bound check (`get_unchecked` and `get_unchecked_mut`)
+ // and copying memory (`ptr::copy_nonoverlapping`).
+ //
+ // a. Indexing:
+ // 1. We checked the size of the array to >= 2.
+ // 2. All the indexing that we will do is always between `0 <= index < len-1` at most.
+ //
+ // b. Memory copying
+ // 1. We are obtaining pointers to references which are guaranteed to be valid.
+ // 2. They cannot overlap because we obtain pointers to difference indices of the slice.
+ // Namely, `i` and `i+1`.
+ // 3. If the slice is properly aligned, the elements are properly aligned.
+ // It is the caller's responsibility to make sure the slice is properly aligned.
+ //
+ // See comments below for further detail.
+ unsafe {
+ // If the last two elements are out-of-order...
+ if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) {
+ // Read the last element into a stack-allocated variable. If a following comparison
+ // operation panics, `hole` will get dropped and automatically write the element back
+ // into the slice.
+ let mut tmp = mem::ManuallyDrop::new(ptr::read(v.get_unchecked(len - 1)));
+ let mut hole = CopyOnDrop { src: &mut *tmp, dest: v.get_unchecked_mut(len - 2) };
+ ptr::copy_nonoverlapping(v.get_unchecked(len - 2), v.get_unchecked_mut(len - 1), 1);
+
+ for i in (0..len - 2).rev() {
+ if !is_less(&*tmp, v.get_unchecked(i)) {
+ break;
+ }
+
+ // Move `i`-th element one place to the right, thus shifting the hole to the left.
+ ptr::copy_nonoverlapping(v.get_unchecked(i), v.get_unchecked_mut(i + 1), 1);
+ hole.dest = v.get_unchecked_mut(i);
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+}
+
+/// Partially sorts a slice by shifting several out-of-order elements around.
+///
+/// Returns `true` if the slice is sorted at the end. This function is *O*(*n*) worst-case.
+#[cold]
+fn partial_insertion_sort<T, F>(v: &mut [T], is_less: &mut F) -> bool
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Maximum number of adjacent out-of-order pairs that will get shifted.
+ const MAX_STEPS: usize = 5;
+ // If the slice is shorter than this, don't shift any elements.
+ const SHORTEST_SHIFTING: usize = 50;
+
+ let len = v.len();
+ let mut i = 1;
+
+ for _ in 0..MAX_STEPS {
+ // SAFETY: We already explicitly did the bound checking with `i < len`.
+ // All our subsequent indexing is only in the range `0 <= index < len`
+ unsafe {
+ // Find the next pair of adjacent out-of-order elements.
+ while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) {
+ i += 1;
+ }
+ }
+
+ // Are we done?
+ if i == len {
+ return true;
+ }
+
+ // Don't shift elements on short arrays, that has a performance cost.
+ if len < SHORTEST_SHIFTING {
+ return false;
+ }
+
+ // Swap the found pair of elements. This puts them in correct order.
+ v.swap(i - 1, i);
+
+ // Shift the smaller element to the left.
+ shift_tail(&mut v[..i], is_less);
+ // Shift the greater element to the right.
+ shift_head(&mut v[i..], is_less);
+ }
+
+ // Didn't manage to sort the slice in the limited number of steps.
+ false
+}
+
+/// Sorts a slice using insertion sort, which is *O*(*n*^2) worst-case.
+fn insertion_sort<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ for i in 1..v.len() {
+ shift_tail(&mut v[..i + 1], is_less);
+ }
+}
+
+/// Sorts `v` using heapsort, which guarantees *O*(*n* \* log(*n*)) worst-case.
+#[cold]
+#[unstable(feature = "sort_internals", reason = "internal to sort module", issue = "none")]
+pub fn heapsort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // This binary heap respects the invariant `parent >= child`.
+ let mut sift_down = |v: &mut [T], mut node| {
+ loop {
+ // Children of `node`:
+ let left = 2 * node + 1;
+ let right = 2 * node + 2;
+
+ // Choose the greater child.
+ let greater =
+ if right < v.len() && is_less(&v[left], &v[right]) { right } else { left };
+
+ // Stop if the invariant holds at `node`.
+ if greater >= v.len() || !is_less(&v[node], &v[greater]) {
+ break;
+ }
+
+ // Swap `node` with the greater child, move one step down, and continue sifting.
+ v.swap(node, greater);
+ node = greater;
+ }
+ };
+
+ // Build the heap in linear time.
+ for i in (0..v.len() / 2).rev() {
+ sift_down(v, i);
+ }
+
+ // Pop maximal elements from the heap.
+ for i in (1..v.len()).rev() {
+ v.swap(0, i);
+ sift_down(&mut v[..i], 0);
+ }
+}
+
+/// Partitions `v` into elements smaller than `pivot`, followed by elements greater than or equal
+/// to `pivot`.
+///
+/// Returns the number of elements smaller than `pivot`.
+///
+/// Partitioning is performed block-by-block in order to minimize the cost of branching operations.
+/// This idea is presented in the [BlockQuicksort][pdf] paper.
+///
+/// [pdf]: http://drops.dagstuhl.de/opus/volltexte/2016/6389/pdf/LIPIcs-ESA-2016-38.pdf
+fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Number of elements in a typical block.
+ const BLOCK: usize = 128;
+
+ // The partitioning algorithm repeats the following steps until completion:
+ //
+ // 1. Trace a block from the left side to identify elements greater than or equal to the pivot.
+ // 2. Trace a block from the right side to identify elements smaller than the pivot.
+ // 3. Exchange the identified elements between the left and right side.
+ //
+ // We keep the following variables for a block of elements:
+ //
+ // 1. `block` - Number of elements in the block.
+ // 2. `start` - Start pointer into the `offsets` array.
+ // 3. `end` - End pointer into the `offsets` array.
+ // 4. `offsets - Indices of out-of-order elements within the block.
+
+ // The current block on the left side (from `l` to `l.add(block_l)`).
+ let mut l = v.as_mut_ptr();
+ let mut block_l = BLOCK;
+ let mut start_l = ptr::null_mut();
+ let mut end_l = ptr::null_mut();
+ let mut offsets_l = [MaybeUninit::<u8>::uninit(); BLOCK];
+
+ // The current block on the right side (from `r.sub(block_r)` to `r`).
+ // SAFETY: The documentation for .add() specifically mention that `vec.as_ptr().add(vec.len())` is always safe`
+ let mut r = unsafe { l.add(v.len()) };
+ let mut block_r = BLOCK;
+ let mut start_r = ptr::null_mut();
+ let mut end_r = ptr::null_mut();
+ let mut offsets_r = [MaybeUninit::<u8>::uninit(); BLOCK];
+
+ // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
+ // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
+
+ // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive).
+ fn width<T>(l: *mut T, r: *mut T) -> usize {
+ assert!(mem::size_of::<T>() > 0);
+ (r as usize - l as usize) / mem::size_of::<T>()
+ }
+
+ loop {
+ // We are done with partitioning block-by-block when `l` and `r` get very close. Then we do
+ // some patch-up work in order to partition the remaining elements in between.
+ let is_done = width(l, r) <= 2 * BLOCK;
+
+ if is_done {
+ // Number of remaining elements (still not compared to the pivot).
+ let mut rem = width(l, r);
+ if start_l < end_l || start_r < end_r {
+ rem -= BLOCK;
+ }
+
+ // Adjust block sizes so that the left and right block don't overlap, but get perfectly
+ // aligned to cover the whole remaining gap.
+ if start_l < end_l {
+ block_r = rem;
+ } else if start_r < end_r {
+ block_l = rem;
+ } else {
+ block_l = rem / 2;
+ block_r = rem - block_l;
+ }
+ debug_assert!(block_l <= BLOCK && block_r <= BLOCK);
+ debug_assert!(width(l, r) == block_l + block_r);
+ }
+
+ if start_l == end_l {
+ // Trace `block_l` elements from the left side.
+ start_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
+ end_l = MaybeUninit::slice_as_mut_ptr(&mut offsets_l);
+ let mut elem = l;
+
+ for i in 0..block_l {
+ // SAFETY: The unsafety operations below involve the usage of the `offset`.
+ // According to the conditions required by the function, we satisfy them because:
+ // 1. `offsets_l` is stack-allocated, and thus considered separate allocated object.
+ // 2. The function `is_less` returns a `bool`.
+ // Casting a `bool` will never overflow `isize`.
+ // 3. We have guaranteed that `block_l` will be `<= BLOCK`.
+ // Plus, `end_l` was initially set to the begin pointer of `offsets_` which was declared on the stack.
+ // Thus, we know that even in the worst case (all invocations of `is_less` returns false) we will only be at most 1 byte pass the end.
+ // Another unsafety operation here is dereferencing `elem`.
+ // However, `elem` was initially the begin pointer to the slice which is always valid.
+ unsafe {
+ // Branchless comparison.
+ *end_l = i as u8;
+ end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
+ elem = elem.offset(1);
+ }
+ }
+ }
+
+ if start_r == end_r {
+ // Trace `block_r` elements from the right side.
+ start_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
+ end_r = MaybeUninit::slice_as_mut_ptr(&mut offsets_r);
+ let mut elem = r;
+
+ for i in 0..block_r {
+ // SAFETY: The unsafety operations below involve the usage of the `offset`.
+ // According to the conditions required by the function, we satisfy them because:
+ // 1. `offsets_r` is stack-allocated, and thus considered separate allocated object.
+ // 2. The function `is_less` returns a `bool`.
+ // Casting a `bool` will never overflow `isize`.
+ // 3. We have guaranteed that `block_r` will be `<= BLOCK`.
+ // Plus, `end_r` was initially set to the begin pointer of `offsets_` which was declared on the stack.
+ // Thus, we know that even in the worst case (all invocations of `is_less` returns true) we will only be at most 1 byte pass the end.
+ // Another unsafety operation here is dereferencing `elem`.
+ // However, `elem` was initially `1 * sizeof(T)` past the end and we decrement it by `1 * sizeof(T)` before accessing it.
+ // Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
+ unsafe {
+ // Branchless comparison.
+ elem = elem.offset(-1);
+ *end_r = i as u8;
+ end_r = end_r.offset(is_less(&*elem, pivot) as isize);
+ }
+ }
+ }
+
+ // Number of out-of-order elements to swap between the left and right side.
+ let count = cmp::min(width(start_l, end_l), width(start_r, end_r));
+
+ if count > 0 {
+ macro_rules! left {
+ () => {
+ l.offset(*start_l as isize)
+ };
+ }
+ macro_rules! right {
+ () => {
+ r.offset(-(*start_r as isize) - 1)
+ };
+ }
+
+ // Instead of swapping one pair at the time, it is more efficient to perform a cyclic
+ // permutation. This is not strictly equivalent to swapping, but produces a similar
+ // result using fewer memory operations.
+ unsafe {
+ let tmp = ptr::read(left!());
+ ptr::copy_nonoverlapping(right!(), left!(), 1);
+
+ for _ in 1..count {
+ start_l = start_l.offset(1);
+ ptr::copy_nonoverlapping(left!(), right!(), 1);
+ start_r = start_r.offset(1);
+ ptr::copy_nonoverlapping(right!(), left!(), 1);
+ }
+
+ ptr::copy_nonoverlapping(&tmp, right!(), 1);
+ mem::forget(tmp);
+ start_l = start_l.offset(1);
+ start_r = start_r.offset(1);
+ }
+ }
+
+ if start_l == end_l {
+ // All out-of-order elements in the left block were moved. Move to the next block.
+ l = unsafe { l.offset(block_l as isize) };
+ }
+
+ if start_r == end_r {
+ // All out-of-order elements in the right block were moved. Move to the previous block.
+ r = unsafe { r.offset(-(block_r as isize)) };
+ }
+
+ if is_done {
+ break;
+ }
+ }
+
+ // All that remains now is at most one block (either the left or the right) with out-of-order
+ // elements that need to be moved. Such remaining elements can be simply shifted to the end
+ // within their block.
+
+ if start_l < end_l {
+ // The left block remains.
+ // Move its remaining out-of-order elements to the far right.
+ debug_assert_eq!(width(l, r), block_l);
+ while start_l < end_l {
+ unsafe {
+ end_l = end_l.offset(-1);
+ ptr::swap(l.offset(*end_l as isize), r.offset(-1));
+ r = r.offset(-1);
+ }
+ }
+ width(v.as_mut_ptr(), r)
+ } else if start_r < end_r {
+ // The right block remains.
+ // Move its remaining out-of-order elements to the far left.
+ debug_assert_eq!(width(l, r), block_r);
+ while start_r < end_r {
+ unsafe {
+ end_r = end_r.offset(-1);
+ ptr::swap(l, r.offset(-(*end_r as isize) - 1));
+ l = l.offset(1);
+ }
+ }
+ width(v.as_mut_ptr(), l)
+ } else {
+ // Nothing else to do, we're done.
+ width(v.as_mut_ptr(), l)
+ }
+}
+
+/// Partitions `v` into elements smaller than `v[pivot]`, followed by elements greater than or
+/// equal to `v[pivot]`.
+///
+/// Returns a tuple of:
+///
+/// 1. Number of elements smaller than `v[pivot]`.
+/// 2. True if `v` was already partitioned.
+fn partition<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let (mid, was_partitioned) = {
+ // Place the pivot at the beginning of slice.
+ v.swap(0, pivot);
+ let (pivot, v) = v.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+ // operation panics, the pivot will be automatically written back into the slice.
+ let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
+ let _pivot_guard = CopyOnDrop { src: &mut *tmp, dest: pivot };
+ let pivot = &*tmp;
+
+ // Find the first pair of out-of-order elements.
+ let mut l = 0;
+ let mut r = v.len();
+
+ // SAFETY: The unsafety below involves indexing an array.
+ // For the first one: We already do the bounds checking here with `l < r`.
+ // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
+ // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
+ unsafe {
+ // Find the first element greater than or equal to the pivot.
+ while l < r && is_less(v.get_unchecked(l), pivot) {
+ l += 1;
+ }
+
+ // Find the last element smaller that the pivot.
+ while l < r && !is_less(v.get_unchecked(r - 1), pivot) {
+ r -= 1;
+ }
+ }
+
+ (l + partition_in_blocks(&mut v[l..r], pivot, is_less), l >= r)
+
+ // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated
+ // variable) back into the slice where it originally was. This step is critical in ensuring
+ // safety!
+ };
+
+ // Place the pivot between the two partitions.
+ v.swap(0, mid);
+
+ (mid, was_partitioned)
+}
+
+/// Partitions `v` into elements equal to `v[pivot]` followed by elements greater than `v[pivot]`.
+///
+/// Returns the number of elements equal to the pivot. It is assumed that `v` does not contain
+/// elements smaller than the pivot.
+fn partition_equal<T, F>(v: &mut [T], pivot: usize, is_less: &mut F) -> usize
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Place the pivot at the beginning of slice.
+ v.swap(0, pivot);
+ let (pivot, v) = v.split_at_mut(1);
+ let pivot = &mut pivot[0];
+
+ // Read the pivot into a stack-allocated variable for efficiency. If a following comparison
+ // operation panics, the pivot will be automatically written back into the slice.
+ // SAFETY: The pointer here is valid because it is obtained from a reference to a slice.
+ let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) });
+ let _pivot_guard = CopyOnDrop { src: &mut *tmp, dest: pivot };
+ let pivot = &*tmp;
+
+ // Now partition the slice.
+ let mut l = 0;
+ let mut r = v.len();
+ loop {
+ // SAFETY: The unsafety below involves indexing an array.
+ // For the first one: We already do the bounds checking here with `l < r`.
+ // For the second one: We initially have `l == 0` and `r == v.len()` and we checked that `l < r` at every indexing operation.
+ // From here we know that `r` must be at least `r == l` which was shown to be valid from the first one.
+ unsafe {
+ // Find the first element greater than the pivot.
+ while l < r && !is_less(pivot, v.get_unchecked(l)) {
+ l += 1;
+ }
+
+ // Find the last element equal to the pivot.
+ while l < r && is_less(pivot, v.get_unchecked(r - 1)) {
+ r -= 1;
+ }
+
+ // Are we done?
+ if l >= r {
+ break;
+ }
+
+ // Swap the found pair of out-of-order elements.
+ r -= 1;
+ ptr::swap(v.get_unchecked_mut(l), v.get_unchecked_mut(r));
+ l += 1;
+ }
+ }
+
+ // We found `l` elements equal to the pivot. Add 1 to account for the pivot itself.
+ l + 1
+
+ // `_pivot_guard` goes out of scope and writes the pivot (which is a stack-allocated variable)
+ // back into the slice where it originally was. This step is critical in ensuring safety!
+}
+
+/// Scatters some elements around in an attempt to break patterns that might cause imbalanced
+/// partitions in quicksort.
+#[cold]
+fn break_patterns<T>(v: &mut [T]) {
+ let len = v.len();
+ if len >= 8 {
+ // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia.
+ let mut random = len as u32;
+ let mut gen_u32 = || {
+ random ^= random << 13;
+ random ^= random >> 17;
+ random ^= random << 5;
+ random
+ };
+ let mut gen_usize = || {
+ if usize::BITS <= 32 {
+ gen_u32() as usize
+ } else {
+ (((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
+ }
+ };
+
+ // Take random numbers modulo this number.
+ // The number fits into `usize` because `len` is not greater than `isize::MAX`.
+ let modulus = len.next_power_of_two();
+
+ // Some pivot candidates will be in the nearby of this index. Let's randomize them.
+ let pos = len / 4 * 2;
+
+ for i in 0..3 {
+ // Generate a random number modulo `len`. However, in order to avoid costly operations
+ // we first take it modulo a power of two, and then decrease by `len` until it fits
+ // into the range `[0, len - 1]`.
+ let mut other = gen_usize() & (modulus - 1);
+
+ // `other` is guaranteed to be less than `2 * len`.
+ if other >= len {
+ other -= len;
+ }
+
+ v.swap(pos - 1 + i, other);
+ }
+ }
+}
+
+/// Chooses a pivot in `v` and returns the index and `true` if the slice is likely already sorted.
+///
+/// Elements in `v` might be reordered in the process.
+fn choose_pivot<T, F>(v: &mut [T], is_less: &mut F) -> (usize, bool)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Minimum length to choose the median-of-medians method.
+ // Shorter slices use the simple median-of-three method.
+ const SHORTEST_MEDIAN_OF_MEDIANS: usize = 50;
+ // Maximum number of swaps that can be performed in this function.
+ const MAX_SWAPS: usize = 4 * 3;
+
+ let len = v.len();
+
+ // Three indices near which we are going to choose a pivot.
+ let mut a = len / 4 * 1;
+ let mut b = len / 4 * 2;
+ let mut c = len / 4 * 3;
+
+ // Counts the total number of swaps we are about to perform while sorting indices.
+ let mut swaps = 0;
+
+ if len >= 8 {
+ // Swaps indices so that `v[a] <= v[b]`.
+ let mut sort2 = |a: &mut usize, b: &mut usize| unsafe {
+ if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) {
+ ptr::swap(a, b);
+ swaps += 1;
+ }
+ };
+
+ // Swaps indices so that `v[a] <= v[b] <= v[c]`.
+ let mut sort3 = |a: &mut usize, b: &mut usize, c: &mut usize| {
+ sort2(a, b);
+ sort2(b, c);
+ sort2(a, b);
+ };
+
+ if len >= SHORTEST_MEDIAN_OF_MEDIANS {
+ // Finds the median of `v[a - 1], v[a], v[a + 1]` and stores the index into `a`.
+ let mut sort_adjacent = |a: &mut usize| {
+ let tmp = *a;
+ sort3(&mut (tmp - 1), a, &mut (tmp + 1));
+ };
+
+ // Find medians in the neighborhoods of `a`, `b`, and `c`.
+ sort_adjacent(&mut a);
+ sort_adjacent(&mut b);
+ sort_adjacent(&mut c);
+ }
+
+ // Find the median among `a`, `b`, and `c`.
+ sort3(&mut a, &mut b, &mut c);
+ }
+
+ if swaps < MAX_SWAPS {
+ (b, swaps == 0)
+ } else {
+ // The maximum number of swaps was performed. Chances are the slice is descending or mostly
+ // descending, so reversing will probably help sort it faster.
+ v.reverse();
+ (len - 1 - b, true)
+ }
+}
+
+/// Sorts `v` recursively.
+///
+/// If the slice had a predecessor in the original array, it is specified as `pred`.
+///
+/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
+/// this function will immediately switch to heapsort.
+fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+
+ // True if the last partitioning was reasonably balanced.
+ let mut was_balanced = true;
+ // True if the last partitioning didn't shuffle elements (the slice was already partitioned).
+ let mut was_partitioned = true;
+
+ loop {
+ let len = v.len();
+
+ // Very short slices get sorted using insertion sort.
+ if len <= MAX_INSERTION {
+ insertion_sort(v, is_less);
+ return;
+ }
+
+ // If too many bad pivot choices were made, simply fall back to heapsort in order to
+ // guarantee `O(n * log(n))` worst-case.
+ if limit == 0 {
+ heapsort(v, is_less);
+ return;
+ }
+
+ // If the last partitioning was imbalanced, try breaking patterns in the slice by shuffling
+ // some elements around. Hopefully we'll choose a better pivot this time.
+ if !was_balanced {
+ break_patterns(v);
+ limit -= 1;
+ }
+
+ // Choose a pivot and try guessing whether the slice is already sorted.
+ let (pivot, likely_sorted) = choose_pivot(v, is_less);
+
+ // If the last partitioning was decently balanced and didn't shuffle elements, and if pivot
+ // selection predicts the slice is likely already sorted...
+ if was_balanced && was_partitioned && likely_sorted {
+ // Try identifying several out-of-order elements and shifting them to correct
+ // positions. If the slice ends up being completely sorted, we're done.
+ if partial_insertion_sort(v, is_less) {
+ return;
+ }
+ }
+
+ // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
+ // slice. Partition the slice into elements equal to and elements greater than the pivot.
+ // This case is usually hit when the slice contains many duplicate elements.
+ if let Some(p) = pred {
+ if !is_less(p, &v[pivot]) {
+ let mid = partition_equal(v, pivot, is_less);
+
+ // Continue sorting elements greater than the pivot.
+ v = &mut { v }[mid..];
+ continue;
+ }
+ }
+
+ // Partition the slice.
+ let (mid, was_p) = partition(v, pivot, is_less);
+ was_balanced = cmp::min(mid, len - mid) >= len / 8;
+ was_partitioned = was_p;
+
+ // Split the slice into `left`, `pivot`, and `right`.
+ let (left, right) = { v }.split_at_mut(mid);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &pivot[0];
+
+ // Recurse into the shorter side only in order to minimize the total number of recursive
+ // calls and consume less stack space. Then just continue with the longer side (this is
+ // akin to tail recursion).
+ if left.len() < right.len() {
+ recurse(left, is_less, pred, limit);
+ v = right;
+ pred = Some(pivot);
+ } else {
+ recurse(right, is_less, Some(pivot), limit);
+ v = left;
+ }
+ }
+}
+
+/// Sorts `v` using pattern-defeating quicksort, which is *O*(*n* \* log(*n*)) worst-case.
+pub fn quicksort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Sorting has no meaningful behavior on zero-sized types.
+ if mem::size_of::<T>() == 0 {
+ return;
+ }
+
+ // Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
+ let limit = usize::BITS - v.len().leading_zeros();
+
+ recurse(v, &mut is_less, None, limit);
+}
+
+fn partition_at_index_loop<'a, T, F>(
+ mut v: &'a mut [T],
+ mut index: usize,
+ is_less: &mut F,
+ mut pred: Option<&'a T>,
+) where
+ F: FnMut(&T, &T) -> bool,
+{
+ loop {
+ // For slices of up to this length it's probably faster to simply sort them.
+ const MAX_INSERTION: usize = 10;
+ if v.len() <= MAX_INSERTION {
+ insertion_sort(v, is_less);
+ return;
+ }
+
+ // Choose a pivot
+ let (pivot, _) = choose_pivot(v, is_less);
+
+ // If the chosen pivot is equal to the predecessor, then it's the smallest element in the
+ // slice. Partition the slice into elements equal to and elements greater than the pivot.
+ // This case is usually hit when the slice contains many duplicate elements.
+ if let Some(p) = pred {
+ if !is_less(p, &v[pivot]) {
+ let mid = partition_equal(v, pivot, is_less);
+
+ // If we've passed our index, then we're good.
+ if mid > index {
+ return;
+ }
+
+ // Otherwise, continue sorting elements greater than the pivot.
+ v = &mut v[mid..];
+ index = index - mid;
+ pred = None;
+ continue;
+ }
+ }
+
+ let (mid, _) = partition(v, pivot, is_less);
+
+ // Split the slice into `left`, `pivot`, and `right`.
+ let (left, right) = { v }.split_at_mut(mid);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &pivot[0];
+
+ if mid < index {
+ v = right;
+ index = index - mid - 1;
+ pred = Some(pivot);
+ } else if mid > index {
+ v = left;
+ } else {
+ // If mid == index, then we're done, since partition() guaranteed that all elements
+ // after mid are greater than or equal to mid.
+ return;
+ }
+ }
+}
+
+pub fn partition_at_index<T, F>(
+ v: &mut [T],
+ index: usize,
+ mut is_less: F,
+) -> (&mut [T], &mut T, &mut [T])
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ use cmp::Ordering::Greater;
+ use cmp::Ordering::Less;
+
+ if index >= v.len() {
+ panic!("partition_at_index index {} greater than length of slice {}", index, v.len());
+ }
+
+ if mem::size_of::<T>() == 0 {
+ // Sorting has no meaningful behavior on zero-sized types. Do nothing.
+ } else if index == v.len() - 1 {
+ // Find max element and place it in the last position of the array. We're free to use
+ // `unwrap()` here because we know v must not be empty.
+ let (max_index, _) = v
+ .iter()
+ .enumerate()
+ .max_by(|&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater })
+ .unwrap();
+ v.swap(max_index, index);
+ } else if index == 0 {
+ // Find min element and place it in the first position of the array. We're free to use
+ // `unwrap()` here because we know v must not be empty.
+ let (min_index, _) = v
+ .iter()
+ .enumerate()
+ .min_by(|&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater })
+ .unwrap();
+ v.swap(min_index, index);
+ } else {
+ partition_at_index_loop(v, index, &mut is_less, None);
+ }
+
+ let (left, right) = v.split_at_mut(index);
+ let (pivot, right) = right.split_at_mut(1);
+ let pivot = &mut pivot[0];
+ (left, pivot, right)
+}
--- /dev/null
+//! Ways to create a `str` from bytes slice.
+
+use crate::mem;
+
+use super::validations::run_utf8_validation;
+use super::Utf8Error;
+
+/// Converts a slice of bytes to a string slice.
+///
+/// A string slice ([`&str`]) is made of bytes ([`u8`]), and a byte slice
+/// ([`&[u8]`][byteslice]) is made of bytes, so this function converts between
+/// the two. Not all byte slices are valid string slices, however: [`&str`] requires
+/// that it is valid UTF-8. `from_utf8()` checks to ensure that the bytes are valid
+/// UTF-8, and then does the conversion.
+///
+/// [`&str`]: str
+/// [byteslice]: ../../std/primitive.slice.html
+///
+/// If you are sure that the byte slice is valid UTF-8, and you don't want to
+/// incur the overhead of the validity check, there is an unsafe version of
+/// this function, [`from_utf8_unchecked`], which has the same
+/// behavior but skips the check.
+///
+/// If you need a `String` instead of a `&str`, consider
+/// [`String::from_utf8`][string].
+///
+/// [string]: ../../std/string/struct.String.html#method.from_utf8
+///
+/// Because you can stack-allocate a `[u8; N]`, and you can take a
+/// [`&[u8]`][byteslice] of it, this function is one way to have a
+/// stack-allocated string. There is an example of this in the
+/// examples section below.
+///
+/// [byteslice]: ../../std/primitive.slice.html
+///
+/// # Errors
+///
+/// Returns `Err` if the slice is not UTF-8 with a description as to why the
+/// provided slice is not UTF-8.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so just use `unwrap()`.
+/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+///
+/// Incorrect bytes:
+///
+/// ```
+/// use std::str;
+///
+/// // some invalid bytes, in a vector
+/// let sparkle_heart = vec![0, 159, 146, 150];
+///
+/// assert!(str::from_utf8(&sparkle_heart).is_err());
+/// ```
+///
+/// See the docs for [`Utf8Error`] for more details on the kinds of
+/// errors that can be returned.
+///
+/// A "stack allocated string":
+///
+/// ```
+/// use std::str;
+///
+/// // some bytes, in a stack-allocated array
+/// let sparkle_heart = [240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so just use `unwrap()`.
+/// let sparkle_heart = str::from_utf8(&sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
+ run_utf8_validation(v)?;
+ // SAFETY: Just ran validation.
+ Ok(unsafe { from_utf8_unchecked(v) })
+}
+
+/// Converts a mutable slice of bytes to a mutable string slice.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// // "Hello, Rust!" as a mutable vector
+/// let mut hellorust = vec![72, 101, 108, 108, 111, 44, 32, 82, 117, 115, 116, 33];
+///
+/// // As we know these bytes are valid, we can use `unwrap()`
+/// let outstr = str::from_utf8_mut(&mut hellorust).unwrap();
+///
+/// assert_eq!("Hello, Rust!", outstr);
+/// ```
+///
+/// Incorrect bytes:
+///
+/// ```
+/// use std::str;
+///
+/// // Some invalid bytes in a mutable vector
+/// let mut invalid = vec![128, 223];
+///
+/// assert!(str::from_utf8_mut(&mut invalid).is_err());
+/// ```
+/// See the docs for [`Utf8Error`] for more details on the kinds of
+/// errors that can be returned.
+#[stable(feature = "str_mut_extras", since = "1.20.0")]
+pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
+ run_utf8_validation(v)?;
+ // SAFETY: Just ran validation.
+ Ok(unsafe { from_utf8_unchecked_mut(v) })
+}
+
+/// Converts a slice of bytes to a string slice without checking
+/// that the string contains valid UTF-8.
+///
+/// See the safe version, [`from_utf8`], for more information.
+///
+/// # Safety
+///
+/// This function is unsafe because it does not check that the bytes passed to
+/// it are valid UTF-8. If this constraint is violated, undefined behavior
+/// results, as the rest of Rust assumes that [`&str`]s are valid UTF-8.
+///
+/// [`&str`]: str
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// let sparkle_heart = unsafe {
+/// str::from_utf8_unchecked(&sparkle_heart)
+/// };
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_str_from_utf8_unchecked", issue = "75196")]
+#[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))]
+#[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))]
+pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
+ // SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
+ // Also relies on `&str` and `&[u8]` having the same layout.
+ unsafe { mem::transmute(v) }
+}
+
+/// Converts a slice of bytes to a string slice without checking
+/// that the string contains valid UTF-8; mutable version.
+///
+/// See the immutable version, [`from_utf8_unchecked()`] for more information.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::str;
+///
+/// let mut heart = vec![240, 159, 146, 150];
+/// let heart = unsafe { str::from_utf8_unchecked_mut(&mut heart) };
+///
+/// assert_eq!("💖", heart);
+/// ```
+#[inline]
+#[stable(feature = "str_mut_extras", since = "1.20.0")]
+pub unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
+ // SAFETY: the caller must guarantee that the bytes `v`
+ // are valid UTF-8, thus the cast to `*mut str` is safe.
+ // Also, the pointer dereference is safe because that pointer
+ // comes from a reference which is guaranteed to be valid for writes.
+ unsafe { &mut *(v as *mut [u8] as *mut str) }
+}
--- /dev/null
+//! Defines utf8 error type.
+
+use crate::fmt;
+
+/// Errors which can occur when attempting to interpret a sequence of [`u8`]
+/// as a string.
+///
+/// As such, the `from_utf8` family of functions and methods for both [`String`]s
+/// and [`&str`]s make use of this error, for example.
+///
+/// [`String`]: ../../std/string/struct.String.html#method.from_utf8
+/// [`&str`]: super::from_utf8
+///
+/// # Examples
+///
+/// This error type’s methods can be used to create functionality
+/// similar to `String::from_utf8_lossy` without allocating heap memory:
+///
+/// ```
+/// fn from_utf8_lossy<F>(mut input: &[u8], mut push: F) where F: FnMut(&str) {
+/// loop {
+/// match std::str::from_utf8(input) {
+/// Ok(valid) => {
+/// push(valid);
+/// break
+/// }
+/// Err(error) => {
+/// let (valid, after_valid) = input.split_at(error.valid_up_to());
+/// unsafe {
+/// push(std::str::from_utf8_unchecked(valid))
+/// }
+/// push("\u{FFFD}");
+///
+/// if let Some(invalid_sequence_length) = error.error_len() {
+/// input = &after_valid[invalid_sequence_length..]
+/// } else {
+/// break
+/// }
+/// }
+/// }
+/// }
+/// }
+/// ```
+#[derive(Copy, Eq, PartialEq, Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Utf8Error {
+ pub(super) valid_up_to: usize,
+ pub(super) error_len: Option<u8>,
+}
+
+impl Utf8Error {
+ /// Returns the index in the given string up to which valid UTF-8 was
+ /// verified.
+ ///
+ /// It is the maximum index such that `from_utf8(&input[..index])`
+ /// would return `Ok(_)`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::str;
+ ///
+ /// // some invalid bytes, in a vector
+ /// let sparkle_heart = vec![0, 159, 146, 150];
+ ///
+ /// // std::str::from_utf8 returns a Utf8Error
+ /// let error = str::from_utf8(&sparkle_heart).unwrap_err();
+ ///
+ /// // the second byte is invalid here
+ /// assert_eq!(1, error.valid_up_to());
+ /// ```
+ #[stable(feature = "utf8_error", since = "1.5.0")]
+ #[inline]
+ pub fn valid_up_to(&self) -> usize {
+ self.valid_up_to
+ }
+
+ /// Provides more information about the failure:
+ ///
+ /// * `None`: the end of the input was reached unexpectedly.
+ /// `self.valid_up_to()` is 1 to 3 bytes from the end of the input.
+ /// If a byte stream (such as a file or a network socket) is being decoded incrementally,
+ /// this could be a valid `char` whose UTF-8 byte sequence is spanning multiple chunks.
+ ///
+ /// * `Some(len)`: an unexpected byte was encountered.
+ /// The length provided is that of the invalid byte sequence
+ /// that starts at the index given by `valid_up_to()`.
+ /// Decoding should resume after that sequence
+ /// (after inserting a [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]) in case of
+ /// lossy decoding.
+ ///
+ /// [U+FFFD]: ../../std/char/constant.REPLACEMENT_CHARACTER.html
+ #[stable(feature = "utf8_error_error_len", since = "1.20.0")]
+ #[inline]
+ pub fn error_len(&self) -> Option<usize> {
+ self.error_len.map(|len| len as usize)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for Utf8Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let Some(error_len) = self.error_len {
+ write!(
+ f,
+ "invalid utf-8 sequence of {} bytes from index {}",
+ error_len, self.valid_up_to
+ )
+ } else {
+ write!(f, "incomplete utf-8 byte sequence from index {}", self.valid_up_to)
+ }
+ }
+}
+
+/// An error returned when parsing a `bool` using [`from_str`] fails
+///
+/// [`from_str`]: super::FromStr::from_str
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct ParseBoolError {
+ pub(super) _priv: (),
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for ParseBoolError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ "provided string was not `true` or `false`".fmt(f)
+ }
+}
--- /dev/null
+//! Iterators for `str` methods.
+
+use crate::char;
+use crate::fmt::{self, Write};
+use crate::iter::TrustedRandomAccess;
+use crate::iter::{Chain, FlatMap, Flatten};
+use crate::iter::{Copied, Filter, FusedIterator, Map, TrustedLen};
+use crate::ops::Try;
+use crate::option;
+use crate::slice::{self, Split as SliceSplit};
+
+use super::from_utf8_unchecked;
+use super::pattern::Pattern;
+use super::pattern::{DoubleEndedSearcher, ReverseSearcher, Searcher};
+use super::validations::{next_code_point, next_code_point_reverse, utf8_is_cont_byte};
+use super::LinesAnyMap;
+use super::{BytesIsNotEmpty, UnsafeBytesToStr};
+use super::{CharEscapeDebugContinue, CharEscapeDefault, CharEscapeUnicode};
+use super::{IsAsciiWhitespace, IsNotEmpty, IsWhitespace};
+
+/// An iterator over the [`char`]s of a string slice.
+///
+///
+/// This struct is created by the [`chars`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`char`]: prim@char
+/// [`chars`]: str::chars
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Chars<'a> {
+ pub(super) iter: slice::Iter<'a, u8>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Chars<'a> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ next_code_point(&mut self.iter).map(|ch| {
+ // SAFETY: `str` invariant says `ch` is a valid Unicode Scalar Value.
+ unsafe { char::from_u32_unchecked(ch) }
+ })
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ // length in `char` is equal to the number of non-continuation bytes
+ let bytes_len = self.iter.len();
+ let mut cont_bytes = 0;
+ for &byte in self.iter {
+ cont_bytes += utf8_is_cont_byte(byte) as usize;
+ }
+ bytes_len - cont_bytes
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.iter.len();
+ // `(len + 3)` can't overflow, because we know that the `slice::Iter`
+ // belongs to a slice in memory which has a maximum length of
+ // `isize::MAX` (that's well below `usize::MAX`).
+ ((len + 3) / 4, Some(len))
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<char> {
+ // No need to go through the entire string.
+ self.next_back()
+ }
+}
+
+#[stable(feature = "chars_debug_impl", since = "1.38.0")]
+impl fmt::Debug for Chars<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Chars(")?;
+ f.debug_list().entries(self.clone()).finish()?;
+ write!(f, ")")?;
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for Chars<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ next_code_point_reverse(&mut self.iter).map(|ch| {
+ // SAFETY: `str` invariant says `ch` is a valid Unicode Scalar Value.
+ unsafe { char::from_u32_unchecked(ch) }
+ })
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Chars<'_> {}
+
+impl<'a> Chars<'a> {
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// This has the same lifetime as the original slice, and so the
+ /// iterator can continue to be used while this exists.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut chars = "abc".chars();
+ ///
+ /// assert_eq!(chars.as_str(), "abc");
+ /// chars.next();
+ /// assert_eq!(chars.as_str(), "bc");
+ /// chars.next();
+ /// chars.next();
+ /// assert_eq!(chars.as_str(), "");
+ /// ```
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ #[inline]
+ pub fn as_str(&self) -> &'a str {
+ // SAFETY: `Chars` is only made from a str, which guarantees the iter is valid UTF-8.
+ unsafe { from_utf8_unchecked(self.iter.as_slice()) }
+ }
+}
+
+/// An iterator over the [`char`]s of a string slice, and their positions.
+///
+/// This struct is created by the [`char_indices`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`char`]: prim@char
+/// [`char_indices`]: str::char_indices
+#[derive(Clone, Debug)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct CharIndices<'a> {
+ pub(super) front_offset: usize,
+ pub(super) iter: Chars<'a>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for CharIndices<'a> {
+ type Item = (usize, char);
+
+ #[inline]
+ fn next(&mut self) -> Option<(usize, char)> {
+ let pre_len = self.iter.iter.len();
+ match self.iter.next() {
+ None => None,
+ Some(ch) => {
+ let index = self.front_offset;
+ let len = self.iter.iter.len();
+ self.front_offset += pre_len - len;
+ Some((index, ch))
+ }
+ }
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.iter.count()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<(usize, char)> {
+ // No need to go through the entire string.
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for CharIndices<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<(usize, char)> {
+ self.iter.next_back().map(|ch| {
+ let index = self.front_offset + self.iter.iter.len();
+ (index, ch)
+ })
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for CharIndices<'_> {}
+
+impl<'a> CharIndices<'a> {
+ /// Views the underlying data as a subslice of the original data.
+ ///
+ /// This has the same lifetime as the original slice, and so the
+ /// iterator can continue to be used while this exists.
+ #[stable(feature = "iter_to_slice", since = "1.4.0")]
+ #[inline]
+ pub fn as_str(&self) -> &'a str {
+ self.iter.as_str()
+ }
+}
+
+/// An iterator over the bytes of a string slice.
+///
+/// This struct is created by the [`bytes`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`bytes`]: str::bytes
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone, Debug)]
+pub struct Bytes<'a>(pub(super) Copied<slice::Iter<'a, u8>>);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Iterator for Bytes<'_> {
+ type Item = u8;
+
+ #[inline]
+ fn next(&mut self) -> Option<u8> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.0.count()
+ }
+
+ #[inline]
+ fn last(self) -> Option<Self::Item> {
+ self.0.last()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth(n)
+ }
+
+ #[inline]
+ fn all<F>(&mut self, f: F) -> bool
+ where
+ F: FnMut(Self::Item) -> bool,
+ {
+ self.0.all(f)
+ }
+
+ #[inline]
+ fn any<F>(&mut self, f: F) -> bool
+ where
+ F: FnMut(Self::Item) -> bool,
+ {
+ self.0.any(f)
+ }
+
+ #[inline]
+ fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.0.find(predicate)
+ }
+
+ #[inline]
+ fn position<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ P: FnMut(Self::Item) -> bool,
+ {
+ self.0.position(predicate)
+ }
+
+ #[inline]
+ fn rposition<P>(&mut self, predicate: P) -> Option<usize>
+ where
+ P: FnMut(Self::Item) -> bool,
+ {
+ self.0.rposition(predicate)
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> u8 {
+ // SAFETY: the caller must uphold the safety contract
+ // for `Iterator::__iterator_get_unchecked`.
+ unsafe { self.0.__iterator_get_unchecked(idx) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl DoubleEndedIterator for Bytes<'_> {
+ #[inline]
+ fn next_back(&mut self) -> Option<u8> {
+ self.0.next_back()
+ }
+
+ #[inline]
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth_back(n)
+ }
+
+ #[inline]
+ fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
+ where
+ P: FnMut(&Self::Item) -> bool,
+ {
+ self.0.rfind(predicate)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ExactSizeIterator for Bytes<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ #[inline]
+ fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Bytes<'_> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl TrustedLen for Bytes<'_> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl TrustedRandomAccess for Bytes<'_> {
+ #[inline]
+ fn may_have_side_effect() -> bool {
+ false
+ }
+}
+
+/// This macro generates a Clone impl for string pattern API
+/// wrapper types of the form X<'a, P>
+macro_rules! derive_pattern_clone {
+ (clone $t:ident with |$s:ident| $e:expr) => {
+ impl<'a, P> Clone for $t<'a, P>
+ where
+ P: Pattern<'a, Searcher: Clone>,
+ {
+ fn clone(&self) -> Self {
+ let $s = self;
+ $e
+ }
+ }
+ };
+}
+
+/// This macro generates two public iterator structs
+/// wrapping a private internal one that makes use of the `Pattern` API.
+///
+/// For all patterns `P: Pattern<'a>` the following items will be
+/// generated (generics omitted):
+///
+/// struct $forward_iterator($internal_iterator);
+/// struct $reverse_iterator($internal_iterator);
+///
+/// impl Iterator for $forward_iterator
+/// { /* internal ends up calling Searcher::next_match() */ }
+///
+/// impl DoubleEndedIterator for $forward_iterator
+/// where P::Searcher: DoubleEndedSearcher
+/// { /* internal ends up calling Searcher::next_match_back() */ }
+///
+/// impl Iterator for $reverse_iterator
+/// where P::Searcher: ReverseSearcher
+/// { /* internal ends up calling Searcher::next_match_back() */ }
+///
+/// impl DoubleEndedIterator for $reverse_iterator
+/// where P::Searcher: DoubleEndedSearcher
+/// { /* internal ends up calling Searcher::next_match() */ }
+///
+/// The internal one is defined outside the macro, and has almost the same
+/// semantic as a DoubleEndedIterator by delegating to `pattern::Searcher` and
+/// `pattern::ReverseSearcher` for both forward and reverse iteration.
+///
+/// "Almost", because a `Searcher` and a `ReverseSearcher` for a given
+/// `Pattern` might not return the same elements, so actually implementing
+/// `DoubleEndedIterator` for it would be incorrect.
+/// (See the docs in `str::pattern` for more details)
+///
+/// However, the internal struct still represents a single ended iterator from
+/// either end, and depending on pattern is also a valid double ended iterator,
+/// so the two wrapper structs implement `Iterator`
+/// and `DoubleEndedIterator` depending on the concrete pattern type, leading
+/// to the complex impls seen above.
+macro_rules! generate_pattern_iterators {
+ {
+ // Forward iterator
+ forward:
+ $(#[$forward_iterator_attribute:meta])*
+ struct $forward_iterator:ident;
+
+ // Reverse iterator
+ reverse:
+ $(#[$reverse_iterator_attribute:meta])*
+ struct $reverse_iterator:ident;
+
+ // Stability of all generated items
+ stability:
+ $(#[$common_stability_attribute:meta])*
+
+ // Internal almost-iterator that is being delegated to
+ internal:
+ $internal_iterator:ident yielding ($iterty:ty);
+
+ // Kind of delegation - either single ended or double ended
+ delegate $($t:tt)*
+ } => {
+ $(#[$forward_iterator_attribute])*
+ $(#[$common_stability_attribute])*
+ pub struct $forward_iterator<'a, P: Pattern<'a>>(pub(super) $internal_iterator<'a, P>);
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> fmt::Debug for $forward_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!($forward_iterator))
+ .field(&self.0)
+ .finish()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P: Pattern<'a>> Iterator for $forward_iterator<'a, P> {
+ type Item = $iterty;
+
+ #[inline]
+ fn next(&mut self) -> Option<$iterty> {
+ self.0.next()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> Clone for $forward_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: Clone>,
+ {
+ fn clone(&self) -> Self {
+ $forward_iterator(self.0.clone())
+ }
+ }
+
+ $(#[$reverse_iterator_attribute])*
+ $(#[$common_stability_attribute])*
+ pub struct $reverse_iterator<'a, P: Pattern<'a>>(pub(super) $internal_iterator<'a, P>);
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> fmt::Debug for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!($reverse_iterator))
+ .field(&self.0)
+ .finish()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> Iterator for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ type Item = $iterty;
+
+ #[inline]
+ fn next(&mut self) -> Option<$iterty> {
+ self.0.next_back()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> Clone for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: Clone>,
+ {
+ fn clone(&self) -> Self {
+ $reverse_iterator(self.0.clone())
+ }
+ }
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<'a, P: Pattern<'a>> FusedIterator for $forward_iterator<'a, P> {}
+
+ #[stable(feature = "fused", since = "1.26.0")]
+ impl<'a, P> FusedIterator for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {}
+
+ generate_pattern_iterators!($($t)* with $(#[$common_stability_attribute])*,
+ $forward_iterator,
+ $reverse_iterator, $iterty);
+ };
+ {
+ double ended; with $(#[$common_stability_attribute:meta])*,
+ $forward_iterator:ident,
+ $reverse_iterator:ident, $iterty:ty
+ } => {
+ $(#[$common_stability_attribute])*
+ impl<'a, P> DoubleEndedIterator for $forward_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>,
+ {
+ #[inline]
+ fn next_back(&mut self) -> Option<$iterty> {
+ self.0.next_back()
+ }
+ }
+
+ $(#[$common_stability_attribute])*
+ impl<'a, P> DoubleEndedIterator for $reverse_iterator<'a, P>
+ where
+ P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>,
+ {
+ #[inline]
+ fn next_back(&mut self) -> Option<$iterty> {
+ self.0.next()
+ }
+ }
+ };
+ {
+ single ended; with $(#[$common_stability_attribute:meta])*,
+ $forward_iterator:ident,
+ $reverse_iterator:ident, $iterty:ty
+ } => {}
+}
+
+derive_pattern_clone! {
+ clone SplitInternal
+ with |s| SplitInternal { matcher: s.matcher.clone(), ..*s }
+}
+
+pub(super) struct SplitInternal<'a, P: Pattern<'a>> {
+ pub(super) start: usize,
+ pub(super) end: usize,
+ pub(super) matcher: P::Searcher,
+ pub(super) allow_trailing_empty: bool,
+ pub(super) finished: bool,
+}
+
+impl<'a, P> fmt::Debug for SplitInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInternal")
+ .field("start", &self.start)
+ .field("end", &self.end)
+ .field("matcher", &self.matcher)
+ .field("allow_trailing_empty", &self.allow_trailing_empty)
+ .field("finished", &self.finished)
+ .finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> SplitInternal<'a, P> {
+ #[inline]
+ fn get_end(&mut self) -> Option<&'a str> {
+ if !self.finished && (self.allow_trailing_empty || self.end - self.start > 0) {
+ self.finished = true;
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ unsafe {
+ let string = self.matcher.haystack().get_unchecked(self.start..self.end);
+ Some(string)
+ }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.finished {
+ return None;
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match() {
+ // SAFETY: `Searcher` guarantees that `a` and `b` lie on unicode boundaries.
+ Some((a, b)) => unsafe {
+ let elt = haystack.get_unchecked(self.start..a);
+ self.start = b;
+ Some(elt)
+ },
+ None => self.get_end(),
+ }
+ }
+
+ #[inline]
+ fn next_inclusive(&mut self) -> Option<&'a str> {
+ if self.finished {
+ return None;
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match() {
+ // SAFETY: `Searcher` guarantees that `b` lies on unicode boundary,
+ // and self.start is either the start of the original string,
+ // or `b` was assigned to it, so it also lies on unicode boundary.
+ Some((_, b)) => unsafe {
+ let elt = haystack.get_unchecked(self.start..b);
+ self.start = b;
+ Some(elt)
+ },
+ None => self.get_end(),
+ }
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ if self.finished {
+ return None;
+ }
+
+ if !self.allow_trailing_empty {
+ self.allow_trailing_empty = true;
+ match self.next_back() {
+ Some(elt) if !elt.is_empty() => return Some(elt),
+ _ => {
+ if self.finished {
+ return None;
+ }
+ }
+ }
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match_back() {
+ // SAFETY: `Searcher` guarantees that `a` and `b` lie on unicode boundaries.
+ Some((a, b)) => unsafe {
+ let elt = haystack.get_unchecked(b..self.end);
+ self.end = a;
+ Some(elt)
+ },
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ None => unsafe {
+ self.finished = true;
+ Some(haystack.get_unchecked(self.start..self.end))
+ },
+ }
+ }
+
+ #[inline]
+ fn next_back_inclusive(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ if self.finished {
+ return None;
+ }
+
+ if !self.allow_trailing_empty {
+ self.allow_trailing_empty = true;
+ match self.next_back_inclusive() {
+ Some(elt) if !elt.is_empty() => return Some(elt),
+ _ => {
+ if self.finished {
+ return None;
+ }
+ }
+ }
+ }
+
+ let haystack = self.matcher.haystack();
+ match self.matcher.next_match_back() {
+ // SAFETY: `Searcher` guarantees that `b` lies on unicode boundary,
+ // and self.end is either the end of the original string,
+ // or `b` was assigned to it, so it also lies on unicode boundary.
+ Some((_, b)) => unsafe {
+ let elt = haystack.get_unchecked(b..self.end);
+ self.end = b;
+ Some(elt)
+ },
+ // SAFETY: self.start is either the start of the original string,
+ // or start of a substring that represents the part of the string that hasn't
+ // iterated yet. Either way, it is guaranteed to lie on unicode boundary.
+ // self.end is either the end of the original string,
+ // or `b` was assigned to it, so it also lies on unicode boundary.
+ None => unsafe {
+ self.finished = true;
+ Some(haystack.get_unchecked(self.start..self.end))
+ },
+ }
+ }
+
+ #[inline]
+ fn as_str(&self) -> &'a str {
+ // `Self::get_end` doesn't change `self.start`
+ if self.finished {
+ return "";
+ }
+
+ // SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
+ unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) }
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`split`].
+ ///
+ /// [`split`]: str::split
+ struct Split;
+ reverse:
+ /// Created with the method [`rsplit`].
+ ///
+ /// [`rsplit`]: str::rsplit
+ struct RSplit;
+ stability:
+ #[stable(feature = "rust1", since = "1.0.0")]
+ internal:
+ SplitInternal yielding (&'a str);
+ delegate double ended;
+}
+
+impl<'a, P: Pattern<'a>> Split<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".split(' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+impl<'a, P: Pattern<'a>> RSplit<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".rsplit(' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "Mary had a little");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`split_terminator`].
+ ///
+ /// [`split_terminator`]: str::split_terminator
+ struct SplitTerminator;
+ reverse:
+ /// Created with the method [`rsplit_terminator`].
+ ///
+ /// [`rsplit_terminator`]: str::rsplit_terminator
+ struct RSplitTerminator;
+ stability:
+ #[stable(feature = "rust1", since = "1.0.0")]
+ internal:
+ SplitInternal yielding (&'a str);
+ delegate double ended;
+}
+
+impl<'a, P: Pattern<'a>> SplitTerminator<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "A..B..".split_terminator('.');
+ /// assert_eq!(split.as_str(), "A..B..");
+ /// split.next();
+ /// assert_eq!(split.as_str(), ".B..");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+impl<'a, P: Pattern<'a>> RSplitTerminator<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "A..B..".rsplit_terminator('.');
+ /// assert_eq!(split.as_str(), "A..B..");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "A..B");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+derive_pattern_clone! {
+ clone SplitNInternal
+ with |s| SplitNInternal { iter: s.iter.clone(), ..*s }
+}
+
+pub(super) struct SplitNInternal<'a, P: Pattern<'a>> {
+ pub(super) iter: SplitInternal<'a, P>,
+ /// The number of splits remaining
+ pub(super) count: usize,
+}
+
+impl<'a, P> fmt::Debug for SplitNInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitNInternal")
+ .field("iter", &self.iter)
+ .field("count", &self.count)
+ .finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> SplitNInternal<'a, P> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count = 0;
+ self.iter.get_end()
+ }
+ _ => {
+ self.count -= 1;
+ self.iter.next()
+ }
+ }
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ match self.count {
+ 0 => None,
+ 1 => {
+ self.count = 0;
+ self.iter.get_end()
+ }
+ _ => {
+ self.count -= 1;
+ self.iter.next_back()
+ }
+ }
+ }
+
+ #[inline]
+ fn as_str(&self) -> &'a str {
+ self.iter.as_str()
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`splitn`].
+ ///
+ /// [`splitn`]: str::splitn
+ struct SplitN;
+ reverse:
+ /// Created with the method [`rsplitn`].
+ ///
+ /// [`rsplitn`]: str::rsplitn
+ struct RSplitN;
+ stability:
+ #[stable(feature = "rust1", since = "1.0.0")]
+ internal:
+ SplitNInternal yielding (&'a str);
+ delegate single ended;
+}
+
+impl<'a, P: Pattern<'a>> SplitN<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".splitn(3, ' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+impl<'a, P: Pattern<'a>> RSplitN<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_as_str)]
+ /// let mut split = "Mary had a little lamb".rsplitn(3, ' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "Mary had a little");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+derive_pattern_clone! {
+ clone MatchIndicesInternal
+ with |s| MatchIndicesInternal(s.0.clone())
+}
+
+pub(super) struct MatchIndicesInternal<'a, P: Pattern<'a>>(pub(super) P::Searcher);
+
+impl<'a, P> fmt::Debug for MatchIndicesInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("MatchIndicesInternal").field(&self.0).finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> {
+ #[inline]
+ fn next(&mut self) -> Option<(usize, &'a str)> {
+ self.0
+ .next_match()
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ .map(|(start, end)| unsafe { (start, self.0.haystack().get_unchecked(start..end)) })
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<(usize, &'a str)>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ self.0
+ .next_match_back()
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ .map(|(start, end)| unsafe { (start, self.0.haystack().get_unchecked(start..end)) })
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`match_indices`].
+ ///
+ /// [`match_indices`]: str::match_indices
+ struct MatchIndices;
+ reverse:
+ /// Created with the method [`rmatch_indices`].
+ ///
+ /// [`rmatch_indices`]: str::rmatch_indices
+ struct RMatchIndices;
+ stability:
+ #[stable(feature = "str_match_indices", since = "1.5.0")]
+ internal:
+ MatchIndicesInternal yielding ((usize, &'a str));
+ delegate double ended;
+}
+
+derive_pattern_clone! {
+ clone MatchesInternal
+ with |s| MatchesInternal(s.0.clone())
+}
+
+pub(super) struct MatchesInternal<'a, P: Pattern<'a>>(pub(super) P::Searcher);
+
+impl<'a, P> fmt::Debug for MatchesInternal<'a, P>
+where
+ P: Pattern<'a, Searcher: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("MatchesInternal").field(&self.0).finish()
+ }
+}
+
+impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ self.0.next_match().map(|(a, b)| unsafe {
+ // Indices are known to be on utf8 boundaries
+ self.0.haystack().get_unchecked(a..b)
+ })
+ }
+
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str>
+ where
+ P::Searcher: ReverseSearcher<'a>,
+ {
+ // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries.
+ self.0.next_match_back().map(|(a, b)| unsafe {
+ // Indices are known to be on utf8 boundaries
+ self.0.haystack().get_unchecked(a..b)
+ })
+ }
+}
+
+generate_pattern_iterators! {
+ forward:
+ /// Created with the method [`matches`].
+ ///
+ /// [`matches`]: str::matches
+ struct Matches;
+ reverse:
+ /// Created with the method [`rmatches`].
+ ///
+ /// [`rmatches`]: str::rmatches
+ struct RMatches;
+ stability:
+ #[stable(feature = "str_matches", since = "1.2.0")]
+ internal:
+ MatchesInternal yielding (&'a str);
+ delegate double ended;
+}
+
+/// An iterator over the lines of a string, as string slices.
+///
+/// This struct is created with the [`lines`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`lines`]: str::lines
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone, Debug)]
+pub struct Lines<'a>(pub(super) Map<SplitTerminator<'a, char>, LinesAnyMap>);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Iterator for Lines<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a str> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> DoubleEndedIterator for Lines<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.0.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Lines<'_> {}
+
+/// Created with the method [`lines_any`].
+///
+/// [`lines_any`]: str::lines_any
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(since = "1.4.0", reason = "use lines()/Lines instead now")]
+#[derive(Clone, Debug)]
+#[allow(deprecated)]
+pub struct LinesAny<'a>(pub(super) Lines<'a>);
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl<'a> Iterator for LinesAny<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.0.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+impl<'a> DoubleEndedIterator for LinesAny<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.0.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+#[allow(deprecated)]
+impl FusedIterator for LinesAny<'_> {}
+
+/// An iterator over the non-whitespace substrings of a string,
+/// separated by any amount of whitespace.
+///
+/// This struct is created by the [`split_whitespace`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`split_whitespace`]: str::split_whitespace
+#[stable(feature = "split_whitespace", since = "1.1.0")]
+#[derive(Clone, Debug)]
+pub struct SplitWhitespace<'a> {
+ pub(super) inner: Filter<Split<'a, IsWhitespace>, IsNotEmpty>,
+}
+
+/// An iterator over the non-ASCII-whitespace substrings of a string,
+/// separated by any amount of ASCII whitespace.
+///
+/// This struct is created by the [`split_ascii_whitespace`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`split_ascii_whitespace`]: str::split_ascii_whitespace
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct SplitAsciiWhitespace<'a> {
+ pub(super) inner:
+ Map<Filter<SliceSplit<'a, u8, IsAsciiWhitespace>, BytesIsNotEmpty>, UnsafeBytesToStr>,
+}
+
+/// An iterator over the substrings of a string,
+/// terminated by a substring matching to a predicate function
+/// Unlike `Split`, it contains the matched part as a terminator
+/// of the subslice.
+///
+/// This struct is created by the [`split_inclusive`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`split_inclusive`]: str::split_inclusive
+#[unstable(feature = "split_inclusive", issue = "72360")]
+pub struct SplitInclusive<'a, P: Pattern<'a>>(pub(super) SplitInternal<'a, P>);
+
+#[stable(feature = "split_whitespace", since = "1.1.0")]
+impl<'a> Iterator for SplitWhitespace<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a str> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "split_whitespace", since = "1.1.0")]
+impl<'a> DoubleEndedIterator for SplitWhitespace<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for SplitWhitespace<'_> {}
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+impl<'a> Iterator for SplitAsciiWhitespace<'a> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a str> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.inner.next_back()
+ }
+}
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+impl FusedIterator for SplitAsciiWhitespace<'_> {}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, P: Pattern<'a>> Iterator for SplitInclusive<'a, P> {
+ type Item = &'a str;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ self.0.next_inclusive()
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, P: Pattern<'a, Searcher: fmt::Debug>> fmt::Debug for SplitInclusive<'a, P> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("SplitInclusive").field("0", &self.0).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, P: Pattern<'a, Searcher: Clone>> Clone for SplitInclusive<'a, P> {
+ fn clone(&self) -> Self {
+ SplitInclusive(self.0.clone())
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, P: Pattern<'a, Searcher: ReverseSearcher<'a>>> DoubleEndedIterator
+ for SplitInclusive<'a, P>
+{
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ self.0.next_back_inclusive()
+ }
+}
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+impl<'a, P: Pattern<'a>> FusedIterator for SplitInclusive<'a, P> {}
+
+impl<'a, P: Pattern<'a>> SplitInclusive<'a, P> {
+ /// Returns remainder of the splitted string
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_inclusive_as_str)]
+ /// #![feature(split_inclusive)]
+ /// let mut split = "Mary had a little lamb".split_inclusive(' ');
+ /// assert_eq!(split.as_str(), "Mary had a little lamb");
+ /// split.next();
+ /// assert_eq!(split.as_str(), "had a little lamb");
+ /// split.by_ref().for_each(drop);
+ /// assert_eq!(split.as_str(), "");
+ /// ```
+ #[inline]
+ #[unstable(feature = "str_split_inclusive_as_str", issue = "77998")]
+ pub fn as_str(&self) -> &'a str {
+ self.0.as_str()
+ }
+}
+
+/// An iterator of [`u16`] over the string encoded as UTF-16.
+///
+/// This struct is created by the [`encode_utf16`] method on [`str`].
+/// See its documentation for more.
+///
+/// [`encode_utf16`]: str::encode_utf16
+#[derive(Clone)]
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub struct EncodeUtf16<'a> {
+ pub(super) chars: Chars<'a>,
+ pub(super) extra: u16,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl fmt::Debug for EncodeUtf16<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("EncodeUtf16 { .. }")
+ }
+}
+
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+impl<'a> Iterator for EncodeUtf16<'a> {
+ type Item = u16;
+
+ #[inline]
+ fn next(&mut self) -> Option<u16> {
+ if self.extra != 0 {
+ let tmp = self.extra;
+ self.extra = 0;
+ return Some(tmp);
+ }
+
+ let mut buf = [0; 2];
+ self.chars.next().map(|ch| {
+ let n = ch.encode_utf16(&mut buf).len();
+ if n == 2 {
+ self.extra = buf[1];
+ }
+ buf[0]
+ })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (low, high) = self.chars.size_hint();
+ // every char gets either one u16 or two u16,
+ // so this iterator is between 1 or 2 times as
+ // long as the underlying iterator.
+ (low, high.and_then(|n| n.checked_mul(2)))
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for EncodeUtf16<'_> {}
+
+/// The return type of [`str::escape_debug`].
+#[stable(feature = "str_escape", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeDebug<'a> {
+ pub(super) inner: Chain<
+ Flatten<option::IntoIter<char::EscapeDebug>>,
+ FlatMap<Chars<'a>, char::EscapeDebug, CharEscapeDebugContinue>,
+ >,
+}
+
+/// The return type of [`str::escape_default`].
+#[stable(feature = "str_escape", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeDefault<'a> {
+ pub(super) inner: FlatMap<Chars<'a>, char::EscapeDefault, CharEscapeDefault>,
+}
+
+/// The return type of [`str::escape_unicode`].
+#[stable(feature = "str_escape", since = "1.34.0")]
+#[derive(Clone, Debug)]
+pub struct EscapeUnicode<'a> {
+ pub(super) inner: FlatMap<Chars<'a>, char::EscapeUnicode, CharEscapeUnicode>,
+}
+
+macro_rules! escape_types_impls {
+ ($( $Name: ident ),+) => {$(
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ impl<'a> fmt::Display for $Name<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.clone().try_for_each(|c| f.write_char(c))
+ }
+ }
+
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ impl<'a> Iterator for $Name<'a> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> { self.inner.next() }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
+
+ #[inline]
+ fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where
+ Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Ok=Acc>
+ {
+ self.inner.try_fold(init, fold)
+ }
+
+ #[inline]
+ fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
+ where Fold: FnMut(Acc, Self::Item) -> Acc,
+ {
+ self.inner.fold(init, fold)
+ }
+ }
+
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ impl<'a> FusedIterator for $Name<'a> {}
+ )+}
+}
+
+escape_types_impls!(EscapeDebug, EscapeDefault, EscapeUnicode);
--- /dev/null
+use crate::char;
+use crate::fmt::{self, Write};
+use crate::mem;
+
+use super::from_utf8_unchecked;
+use super::validations::utf8_char_width;
+
+/// Lossy UTF-8 string.
+#[unstable(feature = "str_internals", issue = "none")]
+pub struct Utf8Lossy {
+ bytes: [u8],
+}
+
+impl Utf8Lossy {
+ pub fn from_str(s: &str) -> &Utf8Lossy {
+ Utf8Lossy::from_bytes(s.as_bytes())
+ }
+
+ pub fn from_bytes(bytes: &[u8]) -> &Utf8Lossy {
+ // SAFETY: Both use the same memory layout, and UTF-8 correctness isn't required.
+ unsafe { mem::transmute(bytes) }
+ }
+
+ pub fn chunks(&self) -> Utf8LossyChunksIter<'_> {
+ Utf8LossyChunksIter { source: &self.bytes }
+ }
+}
+
+/// Iterator over lossy UTF-8 string
+#[unstable(feature = "str_internals", issue = "none")]
+#[allow(missing_debug_implementations)]
+pub struct Utf8LossyChunksIter<'a> {
+ source: &'a [u8],
+}
+
+#[unstable(feature = "str_internals", issue = "none")]
+#[derive(PartialEq, Eq, Debug)]
+pub struct Utf8LossyChunk<'a> {
+ /// Sequence of valid chars.
+ /// Can be empty between broken UTF-8 chars.
+ pub valid: &'a str,
+ /// Single broken char, empty if none.
+ /// Empty iff iterator item is last.
+ pub broken: &'a [u8],
+}
+
+impl<'a> Iterator for Utf8LossyChunksIter<'a> {
+ type Item = Utf8LossyChunk<'a>;
+
+ fn next(&mut self) -> Option<Utf8LossyChunk<'a>> {
+ if self.source.is_empty() {
+ return None;
+ }
+
+ const TAG_CONT_U8: u8 = 128;
+ fn safe_get(xs: &[u8], i: usize) -> u8 {
+ *xs.get(i).unwrap_or(&0)
+ }
+
+ let mut i = 0;
+ while i < self.source.len() {
+ let i_ = i;
+
+ // SAFETY: `i` starts at `0`, is less than `self.source.len()`, and
+ // only increases, so `0 <= i < self.source.len()`.
+ let byte = unsafe { *self.source.get_unchecked(i) };
+ i += 1;
+
+ if byte < 128 {
+ } else {
+ let w = utf8_char_width(byte);
+
+ macro_rules! error {
+ () => {{
+ // SAFETY: We have checked up to `i` that source is valid UTF-8.
+ unsafe {
+ let r = Utf8LossyChunk {
+ valid: from_utf8_unchecked(&self.source[0..i_]),
+ broken: &self.source[i_..i],
+ };
+ self.source = &self.source[i..];
+ return Some(r);
+ }
+ }};
+ }
+
+ match w {
+ 2 => {
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ error!();
+ }
+ i += 1;
+ }
+ 3 => {
+ match (byte, safe_get(self.source, i)) {
+ (0xE0, 0xA0..=0xBF) => (),
+ (0xE1..=0xEC, 0x80..=0xBF) => (),
+ (0xED, 0x80..=0x9F) => (),
+ (0xEE..=0xEF, 0x80..=0xBF) => (),
+ _ => {
+ error!();
+ }
+ }
+ i += 1;
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ error!();
+ }
+ i += 1;
+ }
+ 4 => {
+ match (byte, safe_get(self.source, i)) {
+ (0xF0, 0x90..=0xBF) => (),
+ (0xF1..=0xF3, 0x80..=0xBF) => (),
+ (0xF4, 0x80..=0x8F) => (),
+ _ => {
+ error!();
+ }
+ }
+ i += 1;
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ error!();
+ }
+ i += 1;
+ if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
+ error!();
+ }
+ i += 1;
+ }
+ _ => {
+ error!();
+ }
+ }
+ }
+ }
+
+ let r = Utf8LossyChunk {
+ // SAFETY: We have checked that the entire source is valid UTF-8.
+ valid: unsafe { from_utf8_unchecked(self.source) },
+ broken: &[],
+ };
+ self.source = &[];
+ Some(r)
+ }
+}
+
+impl fmt::Display for Utf8Lossy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // If we're the empty string then our iterator won't actually yield
+ // anything, so perform the formatting manually
+ if self.bytes.is_empty() {
+ return "".fmt(f);
+ }
+
+ for Utf8LossyChunk { valid, broken } in self.chunks() {
+ // If we successfully decoded the whole chunk as a valid string then
+ // we can return a direct formatting of the string which will also
+ // respect various formatting flags if possible.
+ if valid.len() == self.bytes.len() {
+ assert!(broken.is_empty());
+ return valid.fmt(f);
+ }
+
+ f.write_str(valid)?;
+ if !broken.is_empty() {
+ f.write_char(char::REPLACEMENT_CHARACTER)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl fmt::Debug for Utf8Lossy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_char('"')?;
+
+ for Utf8LossyChunk { valid, broken } in self.chunks() {
+ // Valid part.
+ // Here we partially parse UTF-8 again which is suboptimal.
+ {
+ let mut from = 0;
+ for (i, c) in valid.char_indices() {
+ let esc = c.escape_debug();
+ // If char needs escaping, flush backlog so far and write, else skip
+ if esc.len() != 1 {
+ f.write_str(&valid[from..i])?;
+ for c in esc {
+ f.write_char(c)?;
+ }
+ from = i + c.len_utf8();
+ }
+ }
+ f.write_str(&valid[from..])?;
+ }
+
+ // Broken parts of string as hex escape.
+ for &b in broken {
+ write!(f, "\\x{:02x}", b)?;
+ }
+ }
+
+ f.write_char('"')
+ }
+}
--- /dev/null
+//! String manipulation.
+//!
+//! For more details, see the [`std::str`] module.
+//!
+//! [`std::str`]: ../../std/str/index.html
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+mod converts;
+mod error;
+mod iter;
+mod traits;
+mod validations;
+
+use self::pattern::Pattern;
+use self::pattern::{DoubleEndedSearcher, ReverseSearcher, Searcher};
+
+use crate::char;
+use crate::mem;
+use crate::slice::{self, SliceIndex};
+
+pub mod pattern;
+
+#[unstable(feature = "str_internals", issue = "none")]
+#[allow(missing_docs)]
+pub mod lossy;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use converts::{from_utf8, from_utf8_unchecked};
+
+#[stable(feature = "str_mut_extras", since = "1.20.0")]
+pub use converts::{from_utf8_mut, from_utf8_unchecked_mut};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use error::{ParseBoolError, Utf8Error};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use traits::FromStr;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{Bytes, CharIndices, Chars, Lines, SplitWhitespace};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
+pub use iter::LinesAny;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{RSplit, RSplitTerminator, Split, SplitTerminator};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use iter::{RSplitN, SplitN};
+
+#[stable(feature = "str_matches", since = "1.2.0")]
+pub use iter::{Matches, RMatches};
+
+#[stable(feature = "str_match_indices", since = "1.5.0")]
+pub use iter::{MatchIndices, RMatchIndices};
+
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub use iter::EncodeUtf16;
+
+#[stable(feature = "str_escape", since = "1.34.0")]
+pub use iter::{EscapeDebug, EscapeDefault, EscapeUnicode};
+
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+pub use iter::SplitAsciiWhitespace;
+
+#[unstable(feature = "split_inclusive", issue = "72360")]
+use iter::SplitInclusive;
+
+#[unstable(feature = "str_internals", issue = "none")]
+pub use validations::next_code_point;
+
+use iter::MatchIndicesInternal;
+use iter::SplitInternal;
+use iter::{MatchesInternal, SplitNInternal};
+
+use validations::truncate_to_char_boundary;
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
+ const MAX_DISPLAY_LENGTH: usize = 256;
+ let (truncated, s_trunc) = truncate_to_char_boundary(s, MAX_DISPLAY_LENGTH);
+ let ellipsis = if truncated { "[...]" } else { "" };
+
+ // 1. out of bounds
+ if begin > s.len() || end > s.len() {
+ let oob_index = if begin > s.len() { begin } else { end };
+ panic!("byte index {} is out of bounds of `{}`{}", oob_index, s_trunc, ellipsis);
+ }
+
+ // 2. begin <= end
+ assert!(
+ begin <= end,
+ "begin <= end ({} <= {}) when slicing `{}`{}",
+ begin,
+ end,
+ s_trunc,
+ ellipsis
+ );
+
+ // 3. character boundary
+ let index = if !s.is_char_boundary(begin) { begin } else { end };
+ // find the character
+ let mut char_start = index;
+ while !s.is_char_boundary(char_start) {
+ char_start -= 1;
+ }
+ // `char_start` must be less than len and a char boundary
+ let ch = s[char_start..].chars().next().unwrap();
+ let char_range = char_start..char_start + ch.len_utf8();
+ panic!(
+ "byte index {} is not a char boundary; it is inside {:?} (bytes {:?}) of `{}`{}",
+ index, ch, char_range, s_trunc, ellipsis
+ );
+}
+
+#[lang = "str"]
+#[cfg(not(test))]
+impl str {
+ /// Returns the length of `self`.
+ ///
+ /// This length is in bytes, not [`char`]s or graphemes. In other words,
+ /// it may not be what a human considers the length of the string.
+ ///
+ /// [`char`]: prim@char
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let len = "foo".len();
+ /// assert_eq!(3, len);
+ ///
+ /// assert_eq!("ƒoo".len(), 4); // fancy f!
+ /// assert_eq!("ƒoo".chars().count(), 3);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_str_len", since = "1.32.0")]
+ #[inline]
+ pub const fn len(&self) -> usize {
+ self.as_bytes().len()
+ }
+
+ /// Returns `true` if `self` has a length of zero bytes.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "";
+ /// assert!(s.is_empty());
+ ///
+ /// let s = "not empty";
+ /// assert!(!s.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_str_is_empty", since = "1.32.0")]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Checks that `index`-th byte is the first byte in a UTF-8 code point
+ /// sequence or the end of the string.
+ ///
+ /// The start and end of the string (when `index == self.len()`) are
+ /// considered to be boundaries.
+ ///
+ /// Returns `false` if `index` is greater than `self.len()`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ /// assert!(s.is_char_boundary(0));
+ /// // start of `老`
+ /// assert!(s.is_char_boundary(6));
+ /// assert!(s.is_char_boundary(s.len()));
+ ///
+ /// // second byte of `ö`
+ /// assert!(!s.is_char_boundary(2));
+ ///
+ /// // third byte of `老`
+ /// assert!(!s.is_char_boundary(8));
+ /// ```
+ #[stable(feature = "is_char_boundary", since = "1.9.0")]
+ #[inline]
+ pub fn is_char_boundary(&self, index: usize) -> bool {
+ // 0 and len are always ok.
+ // Test for 0 explicitly so that it can optimize out the check
+ // easily and skip reading string data for that case.
+ if index == 0 || index == self.len() {
+ return true;
+ }
+ match self.as_bytes().get(index) {
+ None => false,
+ // This is bit magic equivalent to: b < 128 || b >= 192
+ Some(&b) => (b as i8) >= -0x40,
+ }
+ }
+
+ /// Converts a string slice to a byte slice. To convert the byte slice back
+ /// into a string slice, use the [`from_utf8`] function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bytes = "bors".as_bytes();
+ /// assert_eq!(b"bors", bytes);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "str_as_bytes", since = "1.32.0")]
+ #[inline(always)]
+ #[allow(unused_attributes)]
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_transmute))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_transmute))]
+ pub const fn as_bytes(&self) -> &[u8] {
+ // SAFETY: const sound because we transmute two types with the same layout
+ unsafe { mem::transmute(self) }
+ }
+
+ /// Converts a mutable string slice to a mutable byte slice.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the content of the slice is valid UTF-8
+ /// before the borrow ends and the underlying `str` is used.
+ ///
+ /// Use of a `str` whose contents are not valid UTF-8 is undefined behavior.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("Hello");
+ /// let bytes = unsafe { s.as_bytes_mut() };
+ ///
+ /// assert_eq!(b"Hello", bytes);
+ /// ```
+ ///
+ /// Mutability:
+ ///
+ /// ```
+ /// let mut s = String::from("🗻∈🌏");
+ ///
+ /// unsafe {
+ /// let bytes = s.as_bytes_mut();
+ ///
+ /// bytes[0] = 0xF0;
+ /// bytes[1] = 0x9F;
+ /// bytes[2] = 0x8D;
+ /// bytes[3] = 0x94;
+ /// }
+ ///
+ /// assert_eq!("🍔∈🌏", s);
+ /// ```
+ #[stable(feature = "str_mut_extras", since = "1.20.0")]
+ #[inline(always)]
+ pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
+ // SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
+ // has the same layout as `&[u8]` (only libstd can make this guarantee).
+ // The pointer dereference is safe since it comes from a mutable reference which
+ // is guaranteed to be valid for writes.
+ unsafe { &mut *(self as *mut str as *mut [u8]) }
+ }
+
+ /// Converts a string slice to a raw pointer.
+ ///
+ /// As string slices are a slice of bytes, the raw pointer points to a
+ /// [`u8`]. This pointer will be pointing to the first byte of the string
+ /// slice.
+ ///
+ /// The caller must ensure that the returned pointer is never written to.
+ /// If you need to mutate the contents of the string slice, use [`as_mut_ptr`].
+ ///
+ /// [`as_mut_ptr`]: str::as_mut_ptr
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "Hello";
+ /// let ptr = s.as_ptr();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "rustc_str_as_ptr", since = "1.32.0")]
+ #[inline]
+ pub const fn as_ptr(&self) -> *const u8 {
+ self as *const str as *const u8
+ }
+
+ /// Converts a mutable string slice to a raw pointer.
+ ///
+ /// As string slices are a slice of bytes, the raw pointer points to a
+ /// [`u8`]. This pointer will be pointing to the first byte of the string
+ /// slice.
+ ///
+ /// It is your responsibility to make sure that the string slice only gets
+ /// modified in a way that it remains valid UTF-8.
+ #[stable(feature = "str_as_mut_ptr", since = "1.36.0")]
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut u8 {
+ self as *mut str as *mut u8
+ }
+
+ /// Returns a subslice of `str`.
+ ///
+ /// This is the non-panicking alternative to indexing the `str`. Returns
+ /// [`None`] whenever equivalent indexing operation would panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = String::from("🗻∈🌏");
+ ///
+ /// assert_eq!(Some("🗻"), v.get(0..4));
+ ///
+ /// // indices not on UTF-8 sequence boundaries
+ /// assert!(v.get(1..).is_none());
+ /// assert!(v.get(..8).is_none());
+ ///
+ /// // out of bounds
+ /// assert!(v.get(..42).is_none());
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[inline]
+ pub fn get<I: SliceIndex<str>>(&self, i: I) -> Option<&I::Output> {
+ i.get(self)
+ }
+
+ /// Returns a mutable subslice of `str`.
+ ///
+ /// This is the non-panicking alternative to indexing the `str`. Returns
+ /// [`None`] whenever equivalent indexing operation would panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = String::from("hello");
+ /// // correct length
+ /// assert!(v.get_mut(0..5).is_some());
+ /// // out of bounds
+ /// assert!(v.get_mut(..42).is_none());
+ /// assert_eq!(Some("he"), v.get_mut(0..2).map(|v| &*v));
+ ///
+ /// assert_eq!("hello", v);
+ /// {
+ /// let s = v.get_mut(0..2);
+ /// let s = s.map(|s| {
+ /// s.make_ascii_uppercase();
+ /// &*s
+ /// });
+ /// assert_eq!(Some("HE"), s);
+ /// }
+ /// assert_eq!("HEllo", v);
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[inline]
+ pub fn get_mut<I: SliceIndex<str>>(&mut self, i: I) -> Option<&mut I::Output> {
+ i.get_mut(self)
+ }
+
+ /// Returns an unchecked subslice of `str`.
+ ///
+ /// This is the unchecked alternative to indexing the `str`.
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that these preconditions are
+ /// satisfied:
+ ///
+ /// * The starting index must not exceed the ending index;
+ /// * Indexes must be within bounds of the original slice;
+ /// * Indexes must lie on UTF-8 sequence boundaries.
+ ///
+ /// Failing that, the returned string slice may reference invalid memory or
+ /// violate the invariants communicated by the `str` type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = "🗻∈🌏";
+ /// unsafe {
+ /// assert_eq!("🗻", v.get_unchecked(0..4));
+ /// assert_eq!("∈", v.get_unchecked(4..7));
+ /// assert_eq!("🌏", v.get_unchecked(7..11));
+ /// }
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[inline]
+ pub unsafe fn get_unchecked<I: SliceIndex<str>>(&self, i: I) -> &I::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`;
+ // the slice is dereferencable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &*i.get_unchecked(self) }
+ }
+
+ /// Returns a mutable, unchecked subslice of `str`.
+ ///
+ /// This is the unchecked alternative to indexing the `str`.
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that these preconditions are
+ /// satisfied:
+ ///
+ /// * The starting index must not exceed the ending index;
+ /// * Indexes must be within bounds of the original slice;
+ /// * Indexes must lie on UTF-8 sequence boundaries.
+ ///
+ /// Failing that, the returned string slice may reference invalid memory or
+ /// violate the invariants communicated by the `str` type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = String::from("🗻∈🌏");
+ /// unsafe {
+ /// assert_eq!("🗻", v.get_unchecked_mut(0..4));
+ /// assert_eq!("∈", v.get_unchecked_mut(4..7));
+ /// assert_eq!("🌏", v.get_unchecked_mut(7..11));
+ /// }
+ /// ```
+ #[stable(feature = "str_checked_slicing", since = "1.20.0")]
+ #[inline]
+ pub unsafe fn get_unchecked_mut<I: SliceIndex<str>>(&mut self, i: I) -> &mut I::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`;
+ // the slice is dereferencable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &mut *i.get_unchecked_mut(self) }
+ }
+
+ /// Creates a string slice from another string slice, bypassing safety
+ /// checks.
+ ///
+ /// This is generally not recommended, use with caution! For a safe
+ /// alternative see [`str`] and [`Index`].
+ ///
+ /// [`Index`]: crate::ops::Index
+ ///
+ /// This new slice goes from `begin` to `end`, including `begin` but
+ /// excluding `end`.
+ ///
+ /// To get a mutable string slice instead, see the
+ /// [`slice_mut_unchecked`] method.
+ ///
+ /// [`slice_mut_unchecked`]: str::slice_mut_unchecked
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that three preconditions are
+ /// satisfied:
+ ///
+ /// * `begin` must not exceed `end`.
+ /// * `begin` and `end` must be byte positions within the string slice.
+ /// * `begin` and `end` must lie on UTF-8 sequence boundaries.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ ///
+ /// unsafe {
+ /// assert_eq!("Löwe 老虎 Léopard", s.slice_unchecked(0, 21));
+ /// }
+ ///
+ /// let s = "Hello, world!";
+ ///
+ /// unsafe {
+ /// assert_eq!("world", s.slice_unchecked(7, 12));
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked(begin..end)` instead")]
+ #[inline]
+ pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`;
+ // the slice is dereferencable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &*(begin..end).get_unchecked(self) }
+ }
+
+ /// Creates a string slice from another string slice, bypassing safety
+ /// checks.
+ /// This is generally not recommended, use with caution! For a safe
+ /// alternative see [`str`] and [`IndexMut`].
+ ///
+ /// [`IndexMut`]: crate::ops::IndexMut
+ ///
+ /// This new slice goes from `begin` to `end`, including `begin` but
+ /// excluding `end`.
+ ///
+ /// To get an immutable string slice instead, see the
+ /// [`slice_unchecked`] method.
+ ///
+ /// [`slice_unchecked`]: str::slice_unchecked
+ ///
+ /// # Safety
+ ///
+ /// Callers of this function are responsible that three preconditions are
+ /// satisfied:
+ ///
+ /// * `begin` must not exceed `end`.
+ /// * `begin` and `end` must be byte positions within the string slice.
+ /// * `begin` and `end` must lie on UTF-8 sequence boundaries.
+ #[stable(feature = "str_slice_mut", since = "1.5.0")]
+ #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked_mut(begin..end)` instead")]
+ #[inline]
+ pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`;
+ // the slice is dereferencable because `self` is a safe reference.
+ // The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
+ unsafe { &mut *(begin..end).get_unchecked_mut(self) }
+ }
+
+ /// Divide one string slice into two at an index.
+ ///
+ /// The argument, `mid`, should be a byte offset from the start of the
+ /// string. It must also be on the boundary of a UTF-8 code point.
+ ///
+ /// The two slices returned go from the start of the string slice to `mid`,
+ /// and from `mid` to the end of the string slice.
+ ///
+ /// To get mutable string slices instead, see the [`split_at_mut`]
+ /// method.
+ ///
+ /// [`split_at_mut`]: str::split_at_mut
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is
+ /// past the end of the last code point of the string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "Per Martin-Löf";
+ ///
+ /// let (first, last) = s.split_at(3);
+ ///
+ /// assert_eq!("Per", first);
+ /// assert_eq!(" Martin-Löf", last);
+ /// ```
+ #[inline]
+ #[stable(feature = "str_split_at", since = "1.4.0")]
+ pub fn split_at(&self, mid: usize) -> (&str, &str) {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(mid) {
+ // SAFETY: just checked that `mid` is on a char boundary.
+ unsafe { (self.get_unchecked(0..mid), self.get_unchecked(mid..self.len())) }
+ } else {
+ slice_error_fail(self, 0, mid)
+ }
+ }
+
+ /// Divide one mutable string slice into two at an index.
+ ///
+ /// The argument, `mid`, should be a byte offset from the start of the
+ /// string. It must also be on the boundary of a UTF-8 code point.
+ ///
+ /// The two slices returned go from the start of the string slice to `mid`,
+ /// and from `mid` to the end of the string slice.
+ ///
+ /// To get immutable string slices instead, see the [`split_at`] method.
+ ///
+ /// [`split_at`]: str::split_at
+ ///
+ /// # Panics
+ ///
+ /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is
+ /// past the end of the last code point of the string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = "Per Martin-Löf".to_string();
+ /// {
+ /// let (first, last) = s.split_at_mut(3);
+ /// first.make_ascii_uppercase();
+ /// assert_eq!("PER", first);
+ /// assert_eq!(" Martin-Löf", last);
+ /// }
+ /// assert_eq!("PER Martin-Löf", s);
+ /// ```
+ #[inline]
+ #[stable(feature = "str_split_at", since = "1.4.0")]
+ pub fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(mid) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+ // SAFETY: just checked that `mid` is on a char boundary.
+ unsafe {
+ (
+ from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)),
+ from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr.add(mid), len - mid)),
+ )
+ }
+ } else {
+ slice_error_fail(self, 0, mid)
+ }
+ }
+
+ /// Returns an iterator over the [`char`]s of a string slice.
+ ///
+ /// As a string slice consists of valid UTF-8, we can iterate through a
+ /// string slice by [`char`]. This method returns such an iterator.
+ ///
+ /// It's important to remember that [`char`] represents a Unicode Scalar
+ /// Value, and may not match your idea of what a 'character' is. Iteration
+ /// over grapheme clusters may be what you actually want. This functionality
+ /// is not provided by Rust's standard library, check crates.io instead.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let word = "goodbye";
+ ///
+ /// let count = word.chars().count();
+ /// assert_eq!(7, count);
+ ///
+ /// let mut chars = word.chars();
+ ///
+ /// assert_eq!(Some('g'), chars.next());
+ /// assert_eq!(Some('o'), chars.next());
+ /// assert_eq!(Some('o'), chars.next());
+ /// assert_eq!(Some('d'), chars.next());
+ /// assert_eq!(Some('b'), chars.next());
+ /// assert_eq!(Some('y'), chars.next());
+ /// assert_eq!(Some('e'), chars.next());
+ ///
+ /// assert_eq!(None, chars.next());
+ /// ```
+ ///
+ /// Remember, [`char`]s may not match your intuition about characters:
+ ///
+ /// [`char`]: prim@char
+ ///
+ /// ```
+ /// let y = "y̆";
+ ///
+ /// let mut chars = y.chars();
+ ///
+ /// assert_eq!(Some('y'), chars.next()); // not 'y̆'
+ /// assert_eq!(Some('\u{0306}'), chars.next());
+ ///
+ /// assert_eq!(None, chars.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn chars(&self) -> Chars<'_> {
+ Chars { iter: self.as_bytes().iter() }
+ }
+
+ /// Returns an iterator over the [`char`]s of a string slice, and their
+ /// positions.
+ ///
+ /// As a string slice consists of valid UTF-8, we can iterate through a
+ /// string slice by [`char`]. This method returns an iterator of both
+ /// these [`char`]s, as well as their byte positions.
+ ///
+ /// The iterator yields tuples. The position is first, the [`char`] is
+ /// second.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let word = "goodbye";
+ ///
+ /// let count = word.char_indices().count();
+ /// assert_eq!(7, count);
+ ///
+ /// let mut char_indices = word.char_indices();
+ ///
+ /// assert_eq!(Some((0, 'g')), char_indices.next());
+ /// assert_eq!(Some((1, 'o')), char_indices.next());
+ /// assert_eq!(Some((2, 'o')), char_indices.next());
+ /// assert_eq!(Some((3, 'd')), char_indices.next());
+ /// assert_eq!(Some((4, 'b')), char_indices.next());
+ /// assert_eq!(Some((5, 'y')), char_indices.next());
+ /// assert_eq!(Some((6, 'e')), char_indices.next());
+ ///
+ /// assert_eq!(None, char_indices.next());
+ /// ```
+ ///
+ /// Remember, [`char`]s may not match your intuition about characters:
+ ///
+ /// [`char`]: prim@char
+ ///
+ /// ```
+ /// let yes = "y̆es";
+ ///
+ /// let mut char_indices = yes.char_indices();
+ ///
+ /// assert_eq!(Some((0, 'y')), char_indices.next()); // not (0, 'y̆')
+ /// assert_eq!(Some((1, '\u{0306}')), char_indices.next());
+ ///
+ /// // note the 3 here - the last character took up two bytes
+ /// assert_eq!(Some((3, 'e')), char_indices.next());
+ /// assert_eq!(Some((4, 's')), char_indices.next());
+ ///
+ /// assert_eq!(None, char_indices.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn char_indices(&self) -> CharIndices<'_> {
+ CharIndices { front_offset: 0, iter: self.chars() }
+ }
+
+ /// An iterator over the bytes of a string slice.
+ ///
+ /// As a string slice consists of a sequence of bytes, we can iterate
+ /// through a string slice by byte. This method returns such an iterator.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut bytes = "bors".bytes();
+ ///
+ /// assert_eq!(Some(b'b'), bytes.next());
+ /// assert_eq!(Some(b'o'), bytes.next());
+ /// assert_eq!(Some(b'r'), bytes.next());
+ /// assert_eq!(Some(b's'), bytes.next());
+ ///
+ /// assert_eq!(None, bytes.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn bytes(&self) -> Bytes<'_> {
+ Bytes(self.as_bytes().iter().copied())
+ }
+
+ /// Splits a string slice by whitespace.
+ ///
+ /// The iterator returned will return string slices that are sub-slices of
+ /// the original string slice, separated by any amount of whitespace.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`. If you only want to split on ASCII whitespace
+ /// instead, use [`split_ascii_whitespace`].
+ ///
+ /// [`split_ascii_whitespace`]: str::split_ascii_whitespace
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut iter = "A few words".split_whitespace();
+ ///
+ /// assert_eq!(Some("A"), iter.next());
+ /// assert_eq!(Some("few"), iter.next());
+ /// assert_eq!(Some("words"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ ///
+ /// All kinds of whitespace are considered:
+ ///
+ /// ```
+ /// let mut iter = " Mary had\ta\u{2009}little \n\t lamb".split_whitespace();
+ /// assert_eq!(Some("Mary"), iter.next());
+ /// assert_eq!(Some("had"), iter.next());
+ /// assert_eq!(Some("a"), iter.next());
+ /// assert_eq!(Some("little"), iter.next());
+ /// assert_eq!(Some("lamb"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[stable(feature = "split_whitespace", since = "1.1.0")]
+ #[inline]
+ pub fn split_whitespace(&self) -> SplitWhitespace<'_> {
+ SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) }
+ }
+
+ /// Splits a string slice by ASCII whitespace.
+ ///
+ /// The iterator returned will return string slices that are sub-slices of
+ /// the original string slice, separated by any amount of ASCII whitespace.
+ ///
+ /// To split by Unicode `Whitespace` instead, use [`split_whitespace`].
+ ///
+ /// [`split_whitespace`]: str::split_whitespace
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut iter = "A few words".split_ascii_whitespace();
+ ///
+ /// assert_eq!(Some("A"), iter.next());
+ /// assert_eq!(Some("few"), iter.next());
+ /// assert_eq!(Some("words"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ ///
+ /// All kinds of ASCII whitespace are considered:
+ ///
+ /// ```
+ /// let mut iter = " Mary had\ta little \n\t lamb".split_ascii_whitespace();
+ /// assert_eq!(Some("Mary"), iter.next());
+ /// assert_eq!(Some("had"), iter.next());
+ /// assert_eq!(Some("a"), iter.next());
+ /// assert_eq!(Some("little"), iter.next());
+ /// assert_eq!(Some("lamb"), iter.next());
+ ///
+ /// assert_eq!(None, iter.next());
+ /// ```
+ #[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+ #[inline]
+ pub fn split_ascii_whitespace(&self) -> SplitAsciiWhitespace<'_> {
+ let inner =
+ self.as_bytes().split(IsAsciiWhitespace).filter(BytesIsNotEmpty).map(UnsafeBytesToStr);
+ SplitAsciiWhitespace { inner }
+ }
+
+ /// An iterator over the lines of a string, as string slices.
+ ///
+ /// Lines are ended with either a newline (`\n`) or a carriage return with
+ /// a line feed (`\r\n`).
+ ///
+ /// The final line ending is optional. A string that ends with a final line
+ /// ending will return the same lines as an otherwise identical string
+ /// without a final line ending.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let text = "foo\r\nbar\n\nbaz\n";
+ /// let mut lines = text.lines();
+ ///
+ /// assert_eq!(Some("foo"), lines.next());
+ /// assert_eq!(Some("bar"), lines.next());
+ /// assert_eq!(Some(""), lines.next());
+ /// assert_eq!(Some("baz"), lines.next());
+ ///
+ /// assert_eq!(None, lines.next());
+ /// ```
+ ///
+ /// The final line ending isn't required:
+ ///
+ /// ```
+ /// let text = "foo\nbar\n\r\nbaz";
+ /// let mut lines = text.lines();
+ ///
+ /// assert_eq!(Some("foo"), lines.next());
+ /// assert_eq!(Some("bar"), lines.next());
+ /// assert_eq!(Some(""), lines.next());
+ /// assert_eq!(Some("baz"), lines.next());
+ ///
+ /// assert_eq!(None, lines.next());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn lines(&self) -> Lines<'_> {
+ Lines(self.split_terminator('\n').map(LinesAnyMap))
+ }
+
+ /// An iterator over the lines of a string.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.4.0", reason = "use lines() instead now")]
+ #[inline]
+ #[allow(deprecated)]
+ pub fn lines_any(&self) -> LinesAny<'_> {
+ LinesAny(self.lines())
+ }
+
+ /// Returns an iterator of `u16` over the string encoded as UTF-16.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let text = "Zażółć gęślą jaźń";
+ ///
+ /// let utf8_len = text.len();
+ /// let utf16_len = text.encode_utf16().count();
+ ///
+ /// assert!(utf16_len <= utf8_len);
+ /// ```
+ #[stable(feature = "encode_utf16", since = "1.8.0")]
+ pub fn encode_utf16(&self) -> EncodeUtf16<'_> {
+ EncodeUtf16 { chars: self.chars(), extra: 0 }
+ }
+
+ /// Returns `true` if the given pattern matches a sub-slice of
+ /// this string slice.
+ ///
+ /// Returns `false` if it does not.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bananas = "bananas";
+ ///
+ /// assert!(bananas.contains("nana"));
+ /// assert!(!bananas.contains("apples"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool {
+ pat.is_contained_in(self)
+ }
+
+ /// Returns `true` if the given pattern matches a prefix of this
+ /// string slice.
+ ///
+ /// Returns `false` if it does not.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bananas = "bananas";
+ ///
+ /// assert!(bananas.starts_with("bana"));
+ /// assert!(!bananas.starts_with("nana"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool {
+ pat.is_prefix_of(self)
+ }
+
+ /// Returns `true` if the given pattern matches a suffix of this
+ /// string slice.
+ ///
+ /// Returns `false` if it does not.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let bananas = "bananas";
+ ///
+ /// assert!(bananas.ends_with("anas"));
+ /// assert!(!bananas.ends_with("nana"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn ends_with<'a, P>(&'a self, pat: P) -> bool
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ pat.is_suffix_of(self)
+ }
+
+ /// Returns the byte index of the first character of this string slice that
+ /// matches the pattern.
+ ///
+ /// Returns [`None`] if the pattern doesn't match.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard Gepardi";
+ ///
+ /// assert_eq!(s.find('L'), Some(0));
+ /// assert_eq!(s.find('é'), Some(14));
+ /// assert_eq!(s.find("pard"), Some(17));
+ /// ```
+ ///
+ /// More complex patterns using point-free style and closures:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ ///
+ /// assert_eq!(s.find(char::is_whitespace), Some(5));
+ /// assert_eq!(s.find(char::is_lowercase), Some(1));
+ /// assert_eq!(s.find(|c: char| c.is_whitespace() || c.is_lowercase()), Some(1));
+ /// assert_eq!(s.find(|c: char| (c < 'o') && (c > 'a')), Some(4));
+ /// ```
+ ///
+ /// Not finding the pattern:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ /// let x: &[_] = &['1', '2'];
+ ///
+ /// assert_eq!(s.find(x), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option<usize> {
+ pat.into_searcher(self).next_match().map(|(i, _)| i)
+ }
+
+ /// Returns the byte index for the first character of the rightmost match of the pattern in
+ /// this string slice.
+ ///
+ /// Returns [`None`] if the pattern doesn't match.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard Gepardi";
+ ///
+ /// assert_eq!(s.rfind('L'), Some(13));
+ /// assert_eq!(s.rfind('é'), Some(14));
+ /// assert_eq!(s.rfind("pard"), Some(24));
+ /// ```
+ ///
+ /// More complex patterns with closures:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ ///
+ /// assert_eq!(s.rfind(char::is_whitespace), Some(12));
+ /// assert_eq!(s.rfind(char::is_lowercase), Some(20));
+ /// ```
+ ///
+ /// Not finding the pattern:
+ ///
+ /// ```
+ /// let s = "Löwe 老虎 Léopard";
+ /// let x: &[_] = &['1', '2'];
+ ///
+ /// assert_eq!(s.rfind(x), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rfind<'a, P>(&'a self, pat: P) -> Option<usize>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ pat.into_searcher(self).next_match_back().map(|(i, _)| i)
+ }
+
+ /// An iterator over substrings of this string slice, separated by
+ /// characters matched by a pattern.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rsplit`] method can be used.
+ ///
+ /// [`rsplit`]: str::rsplit
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect();
+ /// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]);
+ ///
+ /// let v: Vec<&str> = "".split('X').collect();
+ /// assert_eq!(v, [""]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect();
+ /// assert_eq!(v, ["lion", "", "tiger", "leopard"]);
+ ///
+ /// let v: Vec<&str> = "lion::tiger::leopard".split("::").collect();
+ /// assert_eq!(v, ["lion", "tiger", "leopard"]);
+ ///
+ /// let v: Vec<&str> = "abc1def2ghi".split(char::is_numeric).collect();
+ /// assert_eq!(v, ["abc", "def", "ghi"]);
+ ///
+ /// let v: Vec<&str> = "lionXtigerXleopard".split(char::is_uppercase).collect();
+ /// assert_eq!(v, ["lion", "tiger", "leopard"]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".split(|c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["abc", "def", "ghi"]);
+ /// ```
+ ///
+ /// If a string contains multiple contiguous separators, you will end up
+ /// with empty strings in the output:
+ ///
+ /// ```
+ /// let x = "||||a||b|c".to_string();
+ /// let d: Vec<_> = x.split('|').collect();
+ ///
+ /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]);
+ /// ```
+ ///
+ /// Contiguous separators are separated by the empty string.
+ ///
+ /// ```
+ /// let x = "(///)".to_string();
+ /// let d: Vec<_> = x.split('/').collect();
+ ///
+ /// assert_eq!(d, &["(", "", "", ")"]);
+ /// ```
+ ///
+ /// Separators at the start or end of a string are neighbored
+ /// by empty strings.
+ ///
+ /// ```
+ /// let d: Vec<_> = "010".split("0").collect();
+ /// assert_eq!(d, &["", "1", ""]);
+ /// ```
+ ///
+ /// When the empty string is used as a separator, it separates
+ /// every character in the string, along with the beginning
+ /// and end of the string.
+ ///
+ /// ```
+ /// let f: Vec<_> = "rust".split("").collect();
+ /// assert_eq!(f, &["", "r", "u", "s", "t", ""]);
+ /// ```
+ ///
+ /// Contiguous separators can lead to possibly surprising behavior
+ /// when whitespace is used as the separator. This code is correct:
+ ///
+ /// ```
+ /// let x = " a b c".to_string();
+ /// let d: Vec<_> = x.split(' ').collect();
+ ///
+ /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]);
+ /// ```
+ ///
+ /// It does _not_ give you:
+ ///
+ /// ```,ignore
+ /// assert_eq!(d, &["a", "b", "c"]);
+ /// ```
+ ///
+ /// Use [`split_whitespace`] for this behavior.
+ ///
+ /// [`split_whitespace`]: str::split_whitespace
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> {
+ Split(SplitInternal {
+ start: 0,
+ end: self.len(),
+ matcher: pat.into_searcher(self),
+ allow_trailing_empty: true,
+ finished: false,
+ })
+ }
+
+ /// An iterator over substrings of this string slice, separated by
+ /// characters matched by a pattern. Differs from the iterator produced by
+ /// `split` in that `split_inclusive` leaves the matched part as the
+ /// terminator of the substring.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(split_inclusive)]
+ /// let v: Vec<&str> = "Mary had a little lamb\nlittle lamb\nlittle lamb."
+ /// .split_inclusive('\n').collect();
+ /// assert_eq!(v, ["Mary had a little lamb\n", "little lamb\n", "little lamb."]);
+ /// ```
+ ///
+ /// If the last element of the string is matched,
+ /// that element will be considered the terminator of the preceding substring.
+ /// That substring will be the last item returned by the iterator.
+ ///
+ /// ```
+ /// #![feature(split_inclusive)]
+ /// let v: Vec<&str> = "Mary had a little lamb\nlittle lamb\nlittle lamb.\n"
+ /// .split_inclusive('\n').collect();
+ /// assert_eq!(v, ["Mary had a little lamb\n", "little lamb\n", "little lamb.\n"]);
+ /// ```
+ #[unstable(feature = "split_inclusive", issue = "72360")]
+ #[inline]
+ pub fn split_inclusive<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitInclusive<'a, P> {
+ SplitInclusive(SplitInternal {
+ start: 0,
+ end: self.len(),
+ matcher: pat.into_searcher(self),
+ allow_trailing_empty: false,
+ finished: false,
+ })
+ }
+
+ /// An iterator over substrings of the given string slice, separated by
+ /// characters matched by a pattern and yielded in reverse order.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a reverse
+ /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`split`] method can be used.
+ ///
+ /// [`split`]: str::split
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb".rsplit(' ').collect();
+ /// assert_eq!(v, ["lamb", "little", "a", "had", "Mary"]);
+ ///
+ /// let v: Vec<&str> = "".rsplit('X').collect();
+ /// assert_eq!(v, [""]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".rsplit('X').collect();
+ /// assert_eq!(v, ["leopard", "tiger", "", "lion"]);
+ ///
+ /// let v: Vec<&str> = "lion::tiger::leopard".rsplit("::").collect();
+ /// assert_eq!(v, ["leopard", "tiger", "lion"]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".rsplit(|c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["ghi", "def", "abc"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplit<'a, P>(&'a self, pat: P) -> RSplit<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RSplit(self.split(pat).0)
+ }
+
+ /// An iterator over substrings of the given string slice, separated by
+ /// characters matched by a pattern.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// Equivalent to [`split`], except that the trailing substring
+ /// is skipped if empty.
+ ///
+ /// [`split`]: str::split
+ ///
+ /// This method can be used for string data that is _terminated_,
+ /// rather than _separated_ by a pattern.
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rsplit_terminator`] method can be used.
+ ///
+ /// [`rsplit_terminator`]: str::rsplit_terminator
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "A.B.".split_terminator('.').collect();
+ /// assert_eq!(v, ["A", "B"]);
+ ///
+ /// let v: Vec<&str> = "A..B..".split_terminator(".").collect();
+ /// assert_eq!(v, ["A", "", "B", ""]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P> {
+ SplitTerminator(SplitInternal { allow_trailing_empty: false, ..self.split(pat).0 })
+ }
+
+ /// An iterator over substrings of `self`, separated by characters
+ /// matched by a pattern and yielded in reverse order.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// Equivalent to [`split`], except that the trailing substring is
+ /// skipped if empty.
+ ///
+ /// [`split`]: str::split
+ ///
+ /// This method can be used for string data that is _terminated_,
+ /// rather than _separated_ by a pattern.
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a
+ /// reverse search, and it will be double ended if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`split_terminator`] method can be
+ /// used.
+ ///
+ /// [`split_terminator`]: str::split_terminator
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v: Vec<&str> = "A.B.".rsplit_terminator('.').collect();
+ /// assert_eq!(v, ["B", "A"]);
+ ///
+ /// let v: Vec<&str> = "A..B..".rsplit_terminator(".").collect();
+ /// assert_eq!(v, ["", "B", "", "A"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplit_terminator<'a, P>(&'a self, pat: P) -> RSplitTerminator<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RSplitTerminator(self.split_terminator(pat).0)
+ }
+
+ /// An iterator over substrings of the given string slice, separated by a
+ /// pattern, restricted to returning at most `n` items.
+ ///
+ /// If `n` substrings are returned, the last substring (the `n`th substring)
+ /// will contain the remainder of the string.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will not be double ended, because it is
+ /// not efficient to support.
+ ///
+ /// If the pattern allows a reverse search, the [`rsplitn`] method can be
+ /// used.
+ ///
+ /// [`rsplitn`]: str::rsplitn
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lambda".splitn(3, ' ').collect();
+ /// assert_eq!(v, ["Mary", "had", "a little lambda"]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".splitn(3, "X").collect();
+ /// assert_eq!(v, ["lion", "", "tigerXleopard"]);
+ ///
+ /// let v: Vec<&str> = "abcXdef".splitn(1, 'X').collect();
+ /// assert_eq!(v, ["abcXdef"]);
+ ///
+ /// let v: Vec<&str> = "".splitn(1, 'X').collect();
+ /// assert_eq!(v, [""]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".splitn(2, |c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["abc", "defXghi"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> {
+ SplitN(SplitNInternal { iter: self.split(pat).0, count: n })
+ }
+
+ /// An iterator over substrings of this string slice, separated by a
+ /// pattern, starting from the end of the string, restricted to returning
+ /// at most `n` items.
+ ///
+ /// If `n` substrings are returned, the last substring (the `n`th substring)
+ /// will contain the remainder of the string.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will not be double ended, because it is not
+ /// efficient to support.
+ ///
+ /// For splitting from the front, the [`splitn`] method can be used.
+ ///
+ /// [`splitn`]: str::splitn
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "Mary had a little lamb".rsplitn(3, ' ').collect();
+ /// assert_eq!(v, ["lamb", "little", "Mary had a"]);
+ ///
+ /// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(3, 'X').collect();
+ /// assert_eq!(v, ["leopard", "tiger", "lionX"]);
+ ///
+ /// let v: Vec<&str> = "lion::tiger::leopard".rsplitn(2, "::").collect();
+ /// assert_eq!(v, ["leopard", "lion::tiger"]);
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abc1defXghi".rsplitn(2, |c| c == '1' || c == 'X').collect();
+ /// assert_eq!(v, ["ghi", "abc1def"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn rsplitn<'a, P>(&'a self, n: usize, pat: P) -> RSplitN<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RSplitN(self.splitn(n, pat).0)
+ }
+
+ /// Splits the string on the first occurrence of the specified delimiter and
+ /// returns prefix before delimiter and suffix after delimiter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_once)]
+ ///
+ /// assert_eq!("cfg".split_once('='), None);
+ /// assert_eq!("cfg=foo".split_once('='), Some(("cfg", "foo")));
+ /// assert_eq!("cfg=foo=bar".split_once('='), Some(("cfg", "foo=bar")));
+ /// ```
+ #[unstable(feature = "str_split_once", reason = "newly added", issue = "74773")]
+ #[inline]
+ pub fn split_once<'a, P: Pattern<'a>>(&'a self, delimiter: P) -> Option<(&'a str, &'a str)> {
+ let (start, end) = delimiter.into_searcher(self).next_match()?;
+ Some((&self[..start], &self[end..]))
+ }
+
+ /// Splits the string on the last occurrence of the specified delimiter and
+ /// returns prefix before delimiter and suffix after delimiter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(str_split_once)]
+ ///
+ /// assert_eq!("cfg".rsplit_once('='), None);
+ /// assert_eq!("cfg=foo".rsplit_once('='), Some(("cfg", "foo")));
+ /// assert_eq!("cfg=foo=bar".rsplit_once('='), Some(("cfg=foo", "bar")));
+ /// ```
+ #[unstable(feature = "str_split_once", reason = "newly added", issue = "74773")]
+ #[inline]
+ pub fn rsplit_once<'a, P>(&'a self, delimiter: P) -> Option<(&'a str, &'a str)>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ let (start, end) = delimiter.into_searcher(self).next_match_back()?;
+ Some((&self[..start], &self[end..]))
+ }
+
+ /// An iterator over the disjoint matches of a pattern within the given string
+ /// slice.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rmatches`] method can be used.
+ ///
+ /// [`rmatches`]: str::matches
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abcXXXabcYYYabc".matches("abc").collect();
+ /// assert_eq!(v, ["abc", "abc", "abc"]);
+ ///
+ /// let v: Vec<&str> = "1abc2abc3".matches(char::is_numeric).collect();
+ /// assert_eq!(v, ["1", "2", "3"]);
+ /// ```
+ #[stable(feature = "str_matches", since = "1.2.0")]
+ #[inline]
+ pub fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P> {
+ Matches(MatchesInternal(pat.into_searcher(self)))
+ }
+
+ /// An iterator over the disjoint matches of a pattern within this string slice,
+ /// yielded in reverse order.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a reverse
+ /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`matches`] method can be used.
+ ///
+ /// [`matches`]: str::matches
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<&str> = "abcXXXabcYYYabc".rmatches("abc").collect();
+ /// assert_eq!(v, ["abc", "abc", "abc"]);
+ ///
+ /// let v: Vec<&str> = "1abc2abc3".rmatches(char::is_numeric).collect();
+ /// assert_eq!(v, ["3", "2", "1"]);
+ /// ```
+ #[stable(feature = "str_matches", since = "1.2.0")]
+ #[inline]
+ pub fn rmatches<'a, P>(&'a self, pat: P) -> RMatches<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RMatches(self.matches(pat).0)
+ }
+
+ /// An iterator over the disjoint matches of a pattern within this string
+ /// slice as well as the index that the match starts at.
+ ///
+ /// For matches of `pat` within `self` that overlap, only the indices
+ /// corresponding to the first match are returned.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern
+ /// allows a reverse search and forward/reverse search yields the same
+ /// elements. This is true for, e.g., [`char`], but not for `&str`.
+ ///
+ /// If the pattern allows a reverse search but its results might differ
+ /// from a forward search, the [`rmatch_indices`] method can be used.
+ ///
+ /// [`rmatch_indices`]: str::match_indices
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<_> = "abcXXXabcYYYabc".match_indices("abc").collect();
+ /// assert_eq!(v, [(0, "abc"), (6, "abc"), (12, "abc")]);
+ ///
+ /// let v: Vec<_> = "1abcabc2".match_indices("abc").collect();
+ /// assert_eq!(v, [(1, "abc"), (4, "abc")]);
+ ///
+ /// let v: Vec<_> = "ababa".match_indices("aba").collect();
+ /// assert_eq!(v, [(0, "aba")]); // only the first `aba`
+ /// ```
+ #[stable(feature = "str_match_indices", since = "1.5.0")]
+ #[inline]
+ pub fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P> {
+ MatchIndices(MatchIndicesInternal(pat.into_searcher(self)))
+ }
+
+ /// An iterator over the disjoint matches of a pattern within `self`,
+ /// yielded in reverse order along with the index of the match.
+ ///
+ /// For matches of `pat` within `self` that overlap, only the indices
+ /// corresponding to the last match are returned.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Iterator behavior
+ ///
+ /// The returned iterator requires that the pattern supports a reverse
+ /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse
+ /// search yields the same elements.
+ ///
+ /// For iterating from the front, the [`match_indices`] method can be used.
+ ///
+ /// [`match_indices`]: str::match_indices
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let v: Vec<_> = "abcXXXabcYYYabc".rmatch_indices("abc").collect();
+ /// assert_eq!(v, [(12, "abc"), (6, "abc"), (0, "abc")]);
+ ///
+ /// let v: Vec<_> = "1abcabc2".rmatch_indices("abc").collect();
+ /// assert_eq!(v, [(4, "abc"), (1, "abc")]);
+ ///
+ /// let v: Vec<_> = "ababa".rmatch_indices("aba").collect();
+ /// assert_eq!(v, [(2, "aba")]); // only the last `aba`
+ /// ```
+ #[stable(feature = "str_match_indices", since = "1.5.0")]
+ #[inline]
+ pub fn rmatch_indices<'a, P>(&'a self, pat: P) -> RMatchIndices<'a, P>
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ RMatchIndices(self.match_indices(pat).0)
+ }
+
+ /// Returns a string slice with leading and trailing whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ ///
+ /// assert_eq!("Hello\tworld", s.trim());
+ /// ```
+ #[inline]
+ #[must_use = "this returns the trimmed string as a slice, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn trim(&self) -> &str {
+ self.trim_matches(|c: char| c.is_whitespace())
+ }
+
+ /// Returns a string slice with leading whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `start` in this context means the first
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be left side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the right side.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ /// assert_eq!("Hello\tworld\t", s.trim_start());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = " English ";
+ /// assert!(Some('E') == s.trim_start().chars().next());
+ ///
+ /// let s = " עברית ";
+ /// assert!(Some('ע') == s.trim_start().chars().next());
+ /// ```
+ #[inline]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ pub fn trim_start(&self) -> &str {
+ self.trim_start_matches(|c: char| c.is_whitespace())
+ }
+
+ /// Returns a string slice with trailing whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `end` in this context means the last
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be right side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the left side.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ /// assert_eq!(" Hello\tworld", s.trim_end());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = " English ";
+ /// assert!(Some('h') == s.trim_end().chars().rev().next());
+ ///
+ /// let s = " עברית ";
+ /// assert!(Some('ת') == s.trim_end().chars().rev().next());
+ /// ```
+ #[inline]
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ pub fn trim_end(&self) -> &str {
+ self.trim_end_matches(|c: char| c.is_whitespace())
+ }
+
+ /// Returns a string slice with leading whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Left' in this context means the first
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _right_ side, not the left.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ ///
+ /// assert_eq!("Hello\tworld\t", s.trim_left());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = " English";
+ /// assert!(Some('E') == s.trim_left().chars().next());
+ ///
+ /// let s = " עברית";
+ /// assert!(Some('ע') == s.trim_left().chars().next());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.33.0",
+ reason = "superseded by `trim_start`",
+ suggestion = "trim_start"
+ )]
+ pub fn trim_left(&self) -> &str {
+ self.trim_start()
+ }
+
+ /// Returns a string slice with trailing whitespace removed.
+ ///
+ /// 'Whitespace' is defined according to the terms of the Unicode Derived
+ /// Core Property `White_Space`.
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Right' in this context means the last
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _left_ side, not the right.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = " Hello\tworld\t";
+ ///
+ /// assert_eq!(" Hello\tworld", s.trim_right());
+ /// ```
+ ///
+ /// Directionality:
+ ///
+ /// ```
+ /// let s = "English ";
+ /// assert!(Some('h') == s.trim_right().chars().rev().next());
+ ///
+ /// let s = "עברית ";
+ /// assert!(Some('ת') == s.trim_right().chars().rev().next());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.33.0",
+ reason = "superseded by `trim_end`",
+ suggestion = "trim_end"
+ )]
+ pub fn trim_right(&self) -> &str {
+ self.trim_end()
+ }
+
+ /// Returns a string slice with all prefixes and suffixes that match a
+ /// pattern repeatedly removed.
+ ///
+ /// The [pattern] can be a [`char`], a slice of [`char`]s, or a function
+ /// or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
+ /// assert_eq!("123foo1bar123".trim_matches(char::is_numeric), "foo1bar");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar");
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// assert_eq!("1foo1barXX".trim_matches(|c| c == '1' || c == 'X'), "foo1bar");
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn trim_matches<'a, P>(&'a self, pat: P) -> &'a str
+ where
+ P: Pattern<'a, Searcher: DoubleEndedSearcher<'a>>,
+ {
+ let mut i = 0;
+ let mut j = 0;
+ let mut matcher = pat.into_searcher(self);
+ if let Some((a, b)) = matcher.next_reject() {
+ i = a;
+ j = b; // Remember earliest known match, correct it below if
+ // last match is different
+ }
+ if let Some((_, b)) = matcher.next_reject_back() {
+ j = b;
+ }
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { self.get_unchecked(i..j) }
+ }
+
+ /// Returns a string slice with all prefixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `start` in this context means the first
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be left side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the right side.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_start_matches('1'), "foo1bar11");
+ /// assert_eq!("123foo1bar123".trim_start_matches(char::is_numeric), "foo1bar123");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_start_matches(x), "foo1bar12");
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ pub fn trim_start_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str {
+ let mut i = self.len();
+ let mut matcher = pat.into_searcher(self);
+ if let Some((a, _)) = matcher.next_reject() {
+ i = a;
+ }
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { self.get_unchecked(i..self.len()) }
+ }
+
+ /// Returns a string slice with the prefix removed.
+ ///
+ /// If the string starts with the pattern `prefix`, returns substring after the prefix, wrapped
+ /// in `Some`. Unlike `trim_start_matches`, this method removes the prefix exactly once.
+ ///
+ /// If the string does not start with `prefix`, returns `None`.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!("foo:bar".strip_prefix("foo:"), Some("bar"));
+ /// assert_eq!("foo:bar".strip_prefix("bar"), None);
+ /// assert_eq!("foofoo".strip_prefix("foo"), Some("foo"));
+ /// ```
+ #[must_use = "this returns the remaining substring as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "str_strip", since = "1.45.0")]
+ pub fn strip_prefix<'a, P: Pattern<'a>>(&'a self, prefix: P) -> Option<&'a str> {
+ prefix.strip_prefix_of(self)
+ }
+
+ /// Returns a string slice with the suffix removed.
+ ///
+ /// If the string ends with the pattern `suffix`, returns the substring before the suffix,
+ /// wrapped in `Some`. Unlike `trim_end_matches`, this method removes the suffix exactly once.
+ ///
+ /// If the string does not end with `suffix`, returns `None`.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!("bar:foo".strip_suffix(":foo"), Some("bar"));
+ /// assert_eq!("bar:foo".strip_suffix("bar"), None);
+ /// assert_eq!("foofoo".strip_suffix("foo"), Some("foo"));
+ /// ```
+ #[must_use = "this returns the remaining substring as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "str_strip", since = "1.45.0")]
+ pub fn strip_suffix<'a, P>(&'a self, suffix: P) -> Option<&'a str>
+ where
+ P: Pattern<'a>,
+ <P as Pattern<'a>>::Searcher: ReverseSearcher<'a>,
+ {
+ suffix.strip_suffix_of(self)
+ }
+
+ /// Returns a string slice with all suffixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. `end` in this context means the last
+ /// position of that byte string; for a left-to-right language like English or
+ /// Russian, this will be right side, and for right-to-left languages like
+ /// Arabic or Hebrew, this will be the left side.
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_end_matches('1'), "11foo1bar");
+ /// assert_eq!("123foo1bar123".trim_end_matches(char::is_numeric), "123foo1bar");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_end_matches(x), "12foo1bar");
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// assert_eq!("1fooX".trim_end_matches(|c| c == '1' || c == 'X'), "1foo");
+ /// ```
+ #[must_use = "this returns the trimmed string as a new slice, \
+ without modifying the original"]
+ #[stable(feature = "trim_direction", since = "1.30.0")]
+ pub fn trim_end_matches<'a, P>(&'a self, pat: P) -> &'a str
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ let mut j = 0;
+ let mut matcher = pat.into_searcher(self);
+ if let Some((_, b)) = matcher.next_reject_back() {
+ j = b;
+ }
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { self.get_unchecked(0..j) }
+ }
+
+ /// Returns a string slice with all prefixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Left' in this context means the first
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _right_ side, not the left.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
+ /// assert_eq!("123foo1bar123".trim_left_matches(char::is_numeric), "foo1bar123");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.33.0",
+ reason = "superseded by `trim_start_matches`",
+ suggestion = "trim_start_matches"
+ )]
+ pub fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str {
+ self.trim_start_matches(pat)
+ }
+
+ /// Returns a string slice with all suffixes that match a pattern
+ /// repeatedly removed.
+ ///
+ /// The [pattern] can be a `&str`, [`char`], a slice of [`char`]s, or a
+ /// function or closure that determines if a character matches.
+ ///
+ /// [`char`]: prim@char
+ /// [pattern]: self::pattern
+ ///
+ /// # Text directionality
+ ///
+ /// A string is a sequence of bytes. 'Right' in this context means the last
+ /// position of that byte string; for a language like Arabic or Hebrew
+ /// which are 'right to left' rather than 'left to right', this will be
+ /// the _left_ side, not the right.
+ ///
+ /// # Examples
+ ///
+ /// Simple patterns:
+ ///
+ /// ```
+ /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar");
+ /// assert_eq!("123foo1bar123".trim_right_matches(char::is_numeric), "123foo1bar");
+ ///
+ /// let x: &[_] = &['1', '2'];
+ /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar");
+ /// ```
+ ///
+ /// A more complex pattern, using a closure:
+ ///
+ /// ```
+ /// assert_eq!("1fooX".trim_right_matches(|c| c == '1' || c == 'X'), "1foo");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(
+ since = "1.33.0",
+ reason = "superseded by `trim_end_matches`",
+ suggestion = "trim_end_matches"
+ )]
+ pub fn trim_right_matches<'a, P>(&'a self, pat: P) -> &'a str
+ where
+ P: Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ {
+ self.trim_end_matches(pat)
+ }
+
+ /// Parses this string slice into another type.
+ ///
+ /// Because `parse` is so general, it can cause problems with type
+ /// inference. As such, `parse` is one of the few times you'll see
+ /// the syntax affectionately known as the 'turbofish': `::<>`. This
+ /// helps the inference algorithm understand specifically which type
+ /// you're trying to parse into.
+ ///
+ /// `parse` can parse any type that implements the [`FromStr`] trait.
+
+ ///
+ /// # Errors
+ ///
+ /// Will return [`Err`] if it's not possible to parse this string slice into
+ /// the desired type.
+ ///
+ /// [`Err`]: FromStr::Err
+ ///
+ /// # Examples
+ ///
+ /// Basic usage
+ ///
+ /// ```
+ /// let four: u32 = "4".parse().unwrap();
+ ///
+ /// assert_eq!(4, four);
+ /// ```
+ ///
+ /// Using the 'turbofish' instead of annotating `four`:
+ ///
+ /// ```
+ /// let four = "4".parse::<u32>();
+ ///
+ /// assert_eq!(Ok(4), four);
+ /// ```
+ ///
+ /// Failing to parse:
+ ///
+ /// ```
+ /// let nope = "j".parse::<u32>();
+ ///
+ /// assert!(nope.is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn parse<F: FromStr>(&self) -> Result<F, F::Err> {
+ FromStr::from_str(self)
+ }
+
+ /// Checks if all characters in this string are within the ASCII range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let ascii = "hello!\n";
+ /// let non_ascii = "Grüße, Jürgen ❤";
+ ///
+ /// assert!(ascii.is_ascii());
+ /// assert!(!non_ascii.is_ascii());
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn is_ascii(&self) -> bool {
+ // We can treat each byte as character here: all multibyte characters
+ // start with a byte that is not in the ascii range, so we will stop
+ // there already.
+ self.as_bytes().is_ascii()
+ }
+
+ /// Checks that two strings are an ASCII case-insensitive match.
+ ///
+ /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
+ /// but without allocating and copying temporaries.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert!("Ferris".eq_ignore_ascii_case("FERRIS"));
+ /// assert!("Ferrös".eq_ignore_ascii_case("FERRöS"));
+ /// assert!(!"Ferrös".eq_ignore_ascii_case("FERRÖS"));
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn eq_ignore_ascii_case(&self, other: &str) -> bool {
+ self.as_bytes().eq_ignore_ascii_case(other.as_bytes())
+ }
+
+ /// Converts this string to its ASCII upper case equivalent in-place.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new uppercased value without modifying the existing one, use
+ /// [`to_ascii_uppercase`].
+ ///
+ /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("Grüße, Jürgen ❤");
+ ///
+ /// s.make_ascii_uppercase();
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s);
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_uppercase(&mut self) {
+ // SAFETY: safe because we transmute two types with the same layout.
+ let me = unsafe { self.as_bytes_mut() };
+ me.make_ascii_uppercase()
+ }
+
+ /// Converts this string to its ASCII lower case equivalent in-place.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To return a new lowercased value without modifying the existing one, use
+ /// [`to_ascii_lowercase`].
+ ///
+ /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("GRÜßE, JÜRGEN ❤");
+ ///
+ /// s.make_ascii_lowercase();
+ ///
+ /// assert_eq!("grÜße, jÜrgen ❤", s);
+ /// ```
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn make_ascii_lowercase(&mut self) {
+ // SAFETY: safe because we transmute two types with the same layout.
+ let me = unsafe { self.as_bytes_mut() };
+ me.make_ascii_lowercase()
+ }
+
+ /// Return an iterator that escapes each char in `self` with [`char::escape_debug`].
+ ///
+ /// Note: only extended grapheme codepoints that begin the string will be
+ /// escaped.
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in "❤\n!".escape_debug() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", "❤\n!".escape_debug());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("❤\\n!");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!("❤\n!".escape_debug().to_string(), "❤\\n!");
+ /// ```
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ pub fn escape_debug(&self) -> EscapeDebug<'_> {
+ let mut chars = self.chars();
+ EscapeDebug {
+ inner: chars
+ .next()
+ .map(|first| first.escape_debug_ext(true))
+ .into_iter()
+ .flatten()
+ .chain(chars.flat_map(CharEscapeDebugContinue)),
+ }
+ }
+
+ /// Return an iterator that escapes each char in `self` with [`char::escape_default`].
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in "❤\n!".escape_default() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", "❤\n!".escape_default());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\u{{2764}}\\n!");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!("❤\n!".escape_default().to_string(), "\\u{2764}\\n!");
+ /// ```
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ pub fn escape_default(&self) -> EscapeDefault<'_> {
+ EscapeDefault { inner: self.chars().flat_map(CharEscapeDefault) }
+ }
+
+ /// Return an iterator that escapes each char in `self` with [`char::escape_unicode`].
+ ///
+ /// # Examples
+ ///
+ /// As an iterator:
+ ///
+ /// ```
+ /// for c in "❤\n!".escape_unicode() {
+ /// print!("{}", c);
+ /// }
+ /// println!();
+ /// ```
+ ///
+ /// Using `println!` directly:
+ ///
+ /// ```
+ /// println!("{}", "❤\n!".escape_unicode());
+ /// ```
+ ///
+ ///
+ /// Both are equivalent to:
+ ///
+ /// ```
+ /// println!("\\u{{2764}}\\u{{a}}\\u{{21}}");
+ /// ```
+ ///
+ /// Using `to_string`:
+ ///
+ /// ```
+ /// assert_eq!("❤\n!".escape_unicode().to_string(), "\\u{2764}\\u{a}\\u{21}");
+ /// ```
+ #[stable(feature = "str_escape", since = "1.34.0")]
+ pub fn escape_unicode(&self) -> EscapeUnicode<'_> {
+ EscapeUnicode { inner: self.chars().flat_map(CharEscapeUnicode) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<[u8]> for str {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_bytes()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Default for &str {
+ /// Creates an empty str
+ #[inline]
+ fn default() -> Self {
+ ""
+ }
+}
+
+#[stable(feature = "default_mut_str", since = "1.28.0")]
+impl Default for &mut str {
+ /// Creates an empty mutable str
+ #[inline]
+ fn default() -> Self {
+ // SAFETY: The empty string is valid UTF-8.
+ unsafe { from_utf8_unchecked_mut(&mut []) }
+ }
+}
+
+impl_fn_for_zst! {
+ /// A nameable, cloneable fn type
+ #[derive(Clone)]
+ struct LinesAnyMap impl<'a> Fn = |line: &'a str| -> &'a str {
+ let l = line.len();
+ if l > 0 && line.as_bytes()[l - 1] == b'\r' { &line[0 .. l - 1] }
+ else { line }
+ };
+
+ #[derive(Clone)]
+ struct CharEscapeDebugContinue impl Fn = |c: char| -> char::EscapeDebug {
+ c.escape_debug_ext(false)
+ };
+
+ #[derive(Clone)]
+ struct CharEscapeUnicode impl Fn = |c: char| -> char::EscapeUnicode {
+ c.escape_unicode()
+ };
+ #[derive(Clone)]
+ struct CharEscapeDefault impl Fn = |c: char| -> char::EscapeDefault {
+ c.escape_default()
+ };
+
+ #[derive(Clone)]
+ struct IsWhitespace impl Fn = |c: char| -> bool {
+ c.is_whitespace()
+ };
+
+ #[derive(Clone)]
+ struct IsAsciiWhitespace impl Fn = |byte: &u8| -> bool {
+ byte.is_ascii_whitespace()
+ };
+
+ #[derive(Clone)]
+ struct IsNotEmpty impl<'a, 'b> Fn = |s: &'a &'b str| -> bool {
+ !s.is_empty()
+ };
+
+ #[derive(Clone)]
+ struct BytesIsNotEmpty impl<'a, 'b> Fn = |s: &'a &'b [u8]| -> bool {
+ !s.is_empty()
+ };
+
+ #[derive(Clone)]
+ struct UnsafeBytesToStr impl<'a> Fn = |bytes: &'a [u8]| -> &'a str {
+ // SAFETY: not safe
+ unsafe { from_utf8_unchecked(bytes) }
+ };
+}
--- /dev/null
+//! The string Pattern API.
+//!
+//! The Pattern API provides a generic mechanism for using different pattern
+//! types when searching through a string.
+//!
+//! For more details, see the traits [`Pattern`], [`Searcher`],
+//! [`ReverseSearcher`], and [`DoubleEndedSearcher`].
+//!
+//! Although this API is unstable, it is exposed via stable APIs on the
+//! [`str`] type.
+//!
+//! # Examples
+//!
+//! [`Pattern`] is [implemented][pattern-impls] in the stable API for
+//! [`&str`][`str`], [`char`], slices of [`char`], and functions and closures
+//! implementing `FnMut(char) -> bool`.
+//!
+//! ```
+//! let s = "Can you find a needle in a haystack?";
+//!
+//! // &str pattern
+//! assert_eq!(s.find("you"), Some(4));
+//! // char pattern
+//! assert_eq!(s.find('n'), Some(2));
+//! // slice of chars pattern
+//! assert_eq!(s.find(&['a', 'e', 'i', 'o', 'u'][..]), Some(1));
+//! // closure pattern
+//! assert_eq!(s.find(|c: char| c.is_ascii_punctuation()), Some(35));
+//! ```
+//!
+//! [pattern-impls]: Pattern#implementors
+
+#![unstable(
+ feature = "pattern",
+ reason = "API not fully fleshed out and ready to be stabilized",
+ issue = "27721"
+)]
+
+use crate::cmp;
+use crate::fmt;
+use crate::slice::memchr;
+
+// Pattern
+
+/// A string pattern.
+///
+/// A `Pattern<'a>` expresses that the implementing type
+/// can be used as a string pattern for searching in a [`&'a str`][str].
+///
+/// For example, both `'a'` and `"aa"` are patterns that
+/// would match at index `1` in the string `"baaaab"`.
+///
+/// The trait itself acts as a builder for an associated
+/// [`Searcher`] type, which does the actual work of finding
+/// occurrences of the pattern in a string.
+///
+/// Depending on the type of the pattern, the behaviour of methods like
+/// [`str::find`] and [`str::contains`] can change. The table below describes
+/// some of those behaviours.
+///
+/// | Pattern type | Match condition |
+/// |--------------------------|-------------------------------------------|
+/// | `&str` | is substring |
+/// | `char` | is contained in string |
+/// | `&[char]` | any char in slice is contained in string |
+/// | `F: FnMut(char) -> bool` | `F` returns `true` for a char in string |
+/// | `&&str` | is substring |
+/// | `&String` | is substring |
+///
+/// # Examples
+///
+/// ```
+/// // &str
+/// assert_eq!("abaaa".find("ba"), Some(1));
+/// assert_eq!("abaaa".find("bac"), None);
+///
+/// // char
+/// assert_eq!("abaaa".find('a'), Some(0));
+/// assert_eq!("abaaa".find('b'), Some(1));
+/// assert_eq!("abaaa".find('c'), None);
+///
+/// // &[char]
+/// assert_eq!("ab".find(&['b', 'a'][..]), Some(0));
+/// assert_eq!("abaaa".find(&['a', 'z'][..]), Some(0));
+/// assert_eq!("abaaa".find(&['c', 'd'][..]), None);
+///
+/// // FnMut(char) -> bool
+/// assert_eq!("abcdef_z".find(|ch| ch > 'd' && ch < 'y'), Some(4));
+/// assert_eq!("abcddd_z".find(|ch| ch > 'd' && ch < 'y'), None);
+/// ```
+pub trait Pattern<'a>: Sized {
+ /// Associated searcher for this pattern
+ type Searcher: Searcher<'a>;
+
+ /// Constructs the associated searcher from
+ /// `self` and the `haystack` to search in.
+ fn into_searcher(self, haystack: &'a str) -> Self::Searcher;
+
+ /// Checks whether the pattern matches anywhere in the haystack
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ self.into_searcher(haystack).next_match().is_some()
+ }
+
+ /// Checks whether the pattern matches at the front of the haystack
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ matches!(self.into_searcher(haystack).next(), SearchStep::Match(0, _))
+ }
+
+ /// Checks whether the pattern matches at the back of the haystack
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ matches!(self.into_searcher(haystack).next_back(), SearchStep::Match(_, j) if haystack.len() == j)
+ }
+
+ /// Removes the pattern from the front of haystack, if it matches.
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ if let SearchStep::Match(start, len) = self.into_searcher(haystack).next() {
+ debug_assert_eq!(
+ start, 0,
+ "The first search step from Searcher \
+ must include the first character"
+ );
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { Some(haystack.get_unchecked(len..)) }
+ } else {
+ None
+ }
+ }
+
+ /// Removes the pattern from the back of haystack, if it matches.
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str>
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ if let SearchStep::Match(start, end) = self.into_searcher(haystack).next_back() {
+ debug_assert_eq!(
+ end,
+ haystack.len(),
+ "The first search step from ReverseSearcher \
+ must include the last character"
+ );
+ // SAFETY: `Searcher` is known to return valid indices.
+ unsafe { Some(haystack.get_unchecked(..start)) }
+ } else {
+ None
+ }
+ }
+}
+
+// Searcher
+
+/// Result of calling [`Searcher::next()`] or [`ReverseSearcher::next_back()`].
+#[derive(Copy, Clone, Eq, PartialEq, Debug)]
+pub enum SearchStep {
+ /// Expresses that a match of the pattern has been found at
+ /// `haystack[a..b]`.
+ Match(usize, usize),
+ /// Expresses that `haystack[a..b]` has been rejected as a possible match
+ /// of the pattern.
+ ///
+ /// Note that there might be more than one `Reject` between two `Match`es,
+ /// there is no requirement for them to be combined into one.
+ Reject(usize, usize),
+ /// Expresses that every byte of the haystack has been visited, ending
+ /// the iteration.
+ Done,
+}
+
+/// A searcher for a string pattern.
+///
+/// This trait provides methods for searching for non-overlapping
+/// matches of a pattern starting from the front (left) of a string.
+///
+/// It will be implemented by associated `Searcher`
+/// types of the [`Pattern`] trait.
+///
+/// The trait is marked unsafe because the indices returned by the
+/// [`next()`][Searcher::next] methods are required to lie on valid utf8
+/// boundaries in the haystack. This enables consumers of this trait to
+/// slice the haystack without additional runtime checks.
+pub unsafe trait Searcher<'a> {
+ /// Getter for the underlying string to be searched in
+ ///
+ /// Will always return the same [`&str`][str].
+ fn haystack(&self) -> &'a str;
+
+ /// Performs the next search step starting from the front.
+ ///
+ /// - Returns [`Match(a, b)`][SearchStep::Match] if `haystack[a..b]` matches
+ /// the pattern.
+ /// - Returns [`Reject(a, b)`][SearchStep::Reject] if `haystack[a..b]` can
+ /// not match the pattern, even partially.
+ /// - Returns [`Done`][SearchStep::Done] if every byte of the haystack has
+ /// been visited.
+ ///
+ /// The stream of [`Match`][SearchStep::Match] and
+ /// [`Reject`][SearchStep::Reject] values up to a [`Done`][SearchStep::Done]
+ /// will contain index ranges that are adjacent, non-overlapping,
+ /// covering the whole haystack, and laying on utf8 boundaries.
+ ///
+ /// A [`Match`][SearchStep::Match] result needs to contain the whole matched
+ /// pattern, however [`Reject`][SearchStep::Reject] results may be split up
+ /// into arbitrary many adjacent fragments. Both ranges may have zero length.
+ ///
+ /// As an example, the pattern `"aaa"` and the haystack `"cbaaaaab"`
+ /// might produce the stream
+ /// `[Reject(0, 1), Reject(1, 2), Match(2, 5), Reject(5, 8)]`
+ fn next(&mut self) -> SearchStep;
+
+ /// Finds the next [`Match`][SearchStep::Match] result. See [`next()`][Searcher::next].
+ ///
+ /// Unlike [`next()`][Searcher::next], there is no guarantee that the returned ranges
+ /// of this and [`next_reject`][Searcher::next_reject] will overlap. This will return
+ /// `(start_match, end_match)`, where start_match is the index of where
+ /// the match begins, and end_match is the index after the end of the match.
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+
+ /// Finds the next [`Reject`][SearchStep::Reject] result. See [`next()`][Searcher::next]
+ /// and [`next_match()`][Searcher::next_match].
+ ///
+ /// Unlike [`next()`][Searcher::next], there is no guarantee that the returned ranges
+ /// of this and [`next_match`][Searcher::next_match] will overlap.
+ #[inline]
+ fn next_reject(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next() {
+ SearchStep::Reject(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+}
+
+/// A reverse searcher for a string pattern.
+///
+/// This trait provides methods for searching for non-overlapping
+/// matches of a pattern starting from the back (right) of a string.
+///
+/// It will be implemented by associated [`Searcher`]
+/// types of the [`Pattern`] trait if the pattern supports searching
+/// for it from the back.
+///
+/// The index ranges returned by this trait are not required
+/// to exactly match those of the forward search in reverse.
+///
+/// For the reason why this trait is marked unsafe, see them
+/// parent trait [`Searcher`].
+pub unsafe trait ReverseSearcher<'a>: Searcher<'a> {
+ /// Performs the next search step starting from the back.
+ ///
+ /// - Returns [`Match(a, b)`][SearchStep::Match] if `haystack[a..b]`
+ /// matches the pattern.
+ /// - Returns [`Reject(a, b)`][SearchStep::Reject] if `haystack[a..b]`
+ /// can not match the pattern, even partially.
+ /// - Returns [`Done`][SearchStep::Done] if every byte of the haystack
+ /// has been visited
+ ///
+ /// The stream of [`Match`][SearchStep::Match] and
+ /// [`Reject`][SearchStep::Reject] values up to a [`Done`][SearchStep::Done]
+ /// will contain index ranges that are adjacent, non-overlapping,
+ /// covering the whole haystack, and laying on utf8 boundaries.
+ ///
+ /// A [`Match`][SearchStep::Match] result needs to contain the whole matched
+ /// pattern, however [`Reject`][SearchStep::Reject] results may be split up
+ /// into arbitrary many adjacent fragments. Both ranges may have zero length.
+ ///
+ /// As an example, the pattern `"aaa"` and the haystack `"cbaaaaab"`
+ /// might produce the stream
+ /// `[Reject(7, 8), Match(4, 7), Reject(1, 4), Reject(0, 1)]`.
+ fn next_back(&mut self) -> SearchStep;
+
+ /// Finds the next [`Match`][SearchStep::Match] result.
+ /// See [`next_back()`][ReverseSearcher::next_back].
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next_back() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+
+ /// Finds the next [`Reject`][SearchStep::Reject] result.
+ /// See [`next_back()`][ReverseSearcher::next_back].
+ #[inline]
+ fn next_reject_back(&mut self) -> Option<(usize, usize)> {
+ loop {
+ match self.next_back() {
+ SearchStep::Reject(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ _ => continue,
+ }
+ }
+ }
+}
+
+/// A marker trait to express that a [`ReverseSearcher`]
+/// can be used for a [`DoubleEndedIterator`] implementation.
+///
+/// For this, the impl of [`Searcher`] and [`ReverseSearcher`] need
+/// to follow these conditions:
+///
+/// - All results of `next()` need to be identical
+/// to the results of `next_back()` in reverse order.
+/// - `next()` and `next_back()` need to behave as
+/// the two ends of a range of values, that is they
+/// can not "walk past each other".
+///
+/// # Examples
+///
+/// `char::Searcher` is a `DoubleEndedSearcher` because searching for a
+/// [`char`] only requires looking at one at a time, which behaves the same
+/// from both ends.
+///
+/// `(&str)::Searcher` is not a `DoubleEndedSearcher` because
+/// the pattern `"aa"` in the haystack `"aaa"` matches as either
+/// `"[aa]a"` or `"a[aa]"`, depending from which side it is searched.
+pub trait DoubleEndedSearcher<'a>: ReverseSearcher<'a> {}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for char
+/////////////////////////////////////////////////////////////////////////////
+
+/// Associated type for `<char as Pattern<'a>>::Searcher`.
+#[derive(Clone, Debug)]
+pub struct CharSearcher<'a> {
+ haystack: &'a str,
+ // safety invariant: `finger`/`finger_back` must be a valid utf8 byte index of `haystack`
+ // This invariant can be broken *within* next_match and next_match_back, however
+ // they must exit with fingers on valid code point boundaries.
+ /// `finger` is the current byte index of the forward search.
+ /// Imagine that it exists before the byte at its index, i.e.
+ /// `haystack[finger]` is the first byte of the slice we must inspect during
+ /// forward searching
+ finger: usize,
+ /// `finger_back` is the current byte index of the reverse search.
+ /// Imagine that it exists after the byte at its index, i.e.
+ /// haystack[finger_back - 1] is the last byte of the slice we must inspect during
+ /// forward searching (and thus the first byte to be inspected when calling next_back()).
+ finger_back: usize,
+ /// The character being searched for
+ needle: char,
+
+ // safety invariant: `utf8_size` must be less than 5
+ /// The number of bytes `needle` takes up when encoded in utf8.
+ utf8_size: usize,
+ /// A utf8 encoded copy of the `needle`
+ utf8_encoded: [u8; 4],
+}
+
+unsafe impl<'a> Searcher<'a> for CharSearcher<'a> {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.haystack
+ }
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ let old_finger = self.finger;
+ // SAFETY: 1-4 guarantee safety of `get_unchecked`
+ // 1. `self.finger` and `self.finger_back` are kept on unicode boundaries
+ // (this is invariant)
+ // 2. `self.finger >= 0` since it starts at 0 and only increases
+ // 3. `self.finger < self.finger_back` because otherwise the char `iter`
+ // would return `SearchStep::Done`
+ // 4. `self.finger` comes before the end of the haystack because `self.finger_back`
+ // starts at the end and only decreases
+ let slice = unsafe { self.haystack.get_unchecked(old_finger..self.finger_back) };
+ let mut iter = slice.chars();
+ let old_len = iter.iter.len();
+ if let Some(ch) = iter.next() {
+ // add byte offset of current character
+ // without re-encoding as utf-8
+ self.finger += old_len - iter.iter.len();
+ if ch == self.needle {
+ SearchStep::Match(old_finger, self.finger)
+ } else {
+ SearchStep::Reject(old_finger, self.finger)
+ }
+ } else {
+ SearchStep::Done
+ }
+ }
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ loop {
+ // get the haystack after the last character found
+ let bytes = self.haystack.as_bytes().get(self.finger..self.finger_back)?;
+ // the last byte of the utf8 encoded needle
+ // SAFETY: we have an invariant that `utf8_size < 5`
+ let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size - 1) };
+ if let Some(index) = memchr::memchr(last_byte, bytes) {
+ // The new finger is the index of the byte we found,
+ // plus one, since we memchr'd for the last byte of the character.
+ //
+ // Note that this doesn't always give us a finger on a UTF8 boundary.
+ // If we *didn't* find our character
+ // we may have indexed to the non-last byte of a 3-byte or 4-byte character.
+ // We can't just skip to the next valid starting byte because a character like
+ // ꁁ (U+A041 YI SYLLABLE PA), utf-8 `EA 81 81` will have us always find
+ // the second byte when searching for the third.
+ //
+ // However, this is totally okay. While we have the invariant that
+ // self.finger is on a UTF8 boundary, this invariant is not relied upon
+ // within this method (it is relied upon in CharSearcher::next()).
+ //
+ // We only exit this method when we reach the end of the string, or if we
+ // find something. When we find something the `finger` will be set
+ // to a UTF8 boundary.
+ self.finger += index + 1;
+ if self.finger >= self.utf8_size {
+ let found_char = self.finger - self.utf8_size;
+ if let Some(slice) = self.haystack.as_bytes().get(found_char..self.finger) {
+ if slice == &self.utf8_encoded[0..self.utf8_size] {
+ return Some((found_char, self.finger));
+ }
+ }
+ }
+ } else {
+ // found nothing, exit
+ self.finger = self.finger_back;
+ return None;
+ }
+ }
+ }
+
+ // let next_reject use the default implementation from the Searcher trait
+}
+
+unsafe impl<'a> ReverseSearcher<'a> for CharSearcher<'a> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ let old_finger = self.finger_back;
+ // SAFETY: see the comment for next() above
+ let slice = unsafe { self.haystack.get_unchecked(self.finger..old_finger) };
+ let mut iter = slice.chars();
+ let old_len = iter.iter.len();
+ if let Some(ch) = iter.next_back() {
+ // subtract byte offset of current character
+ // without re-encoding as utf-8
+ self.finger_back -= old_len - iter.iter.len();
+ if ch == self.needle {
+ SearchStep::Match(self.finger_back, old_finger)
+ } else {
+ SearchStep::Reject(self.finger_back, old_finger)
+ }
+ } else {
+ SearchStep::Done
+ }
+ }
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ let haystack = self.haystack.as_bytes();
+ loop {
+ // get the haystack up to but not including the last character searched
+ let bytes = haystack.get(self.finger..self.finger_back)?;
+ // the last byte of the utf8 encoded needle
+ // SAFETY: we have an invariant that `utf8_size < 5`
+ let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size - 1) };
+ if let Some(index) = memchr::memrchr(last_byte, bytes) {
+ // we searched a slice that was offset by self.finger,
+ // add self.finger to recoup the original index
+ let index = self.finger + index;
+ // memrchr will return the index of the byte we wish to
+ // find. In case of an ASCII character, this is indeed
+ // were we wish our new finger to be ("after" the found
+ // char in the paradigm of reverse iteration). For
+ // multibyte chars we need to skip down by the number of more
+ // bytes they have than ASCII
+ let shift = self.utf8_size - 1;
+ if index >= shift {
+ let found_char = index - shift;
+ if let Some(slice) = haystack.get(found_char..(found_char + self.utf8_size)) {
+ if slice == &self.utf8_encoded[0..self.utf8_size] {
+ // move finger to before the character found (i.e., at its start index)
+ self.finger_back = found_char;
+ return Some((self.finger_back, self.finger_back + self.utf8_size));
+ }
+ }
+ }
+ // We can't use finger_back = index - size + 1 here. If we found the last char
+ // of a different-sized character (or the middle byte of a different character)
+ // we need to bump the finger_back down to `index`. This similarly makes
+ // `finger_back` have the potential to no longer be on a boundary,
+ // but this is OK since we only exit this function on a boundary
+ // or when the haystack has been searched completely.
+ //
+ // Unlike next_match this does not
+ // have the problem of repeated bytes in utf-8 because
+ // we're searching for the last byte, and we can only have
+ // found the last byte when searching in reverse.
+ self.finger_back = index;
+ } else {
+ self.finger_back = self.finger;
+ // found nothing, exit
+ return None;
+ }
+ }
+ }
+
+ // let next_reject_back use the default implementation from the Searcher trait
+}
+
+impl<'a> DoubleEndedSearcher<'a> for CharSearcher<'a> {}
+
+/// Searches for chars that are equal to a given [`char`].
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find('o'), Some(4));
+/// ```
+impl<'a> Pattern<'a> for char {
+ type Searcher = CharSearcher<'a>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> Self::Searcher {
+ let mut utf8_encoded = [0; 4];
+ let utf8_size = self.encode_utf8(&mut utf8_encoded).len();
+ CharSearcher {
+ haystack,
+ finger: 0,
+ finger_back: haystack.len(),
+ needle: self,
+ utf8_size,
+ utf8_encoded,
+ }
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ if (self as u32) < 128 {
+ haystack.as_bytes().contains(&(self as u8))
+ } else {
+ let mut buffer = [0u8; 4];
+ self.encode_utf8(&mut buffer).is_contained_in(haystack)
+ }
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ self.encode_utf8(&mut [0u8; 4]).is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self.encode_utf8(&mut [0u8; 4]).strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ self.encode_utf8(&mut [0u8; 4]).is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str>
+ where
+ Self::Searcher: ReverseSearcher<'a>,
+ {
+ self.encode_utf8(&mut [0u8; 4]).strip_suffix_of(haystack)
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for a MultiCharEq wrapper
+/////////////////////////////////////////////////////////////////////////////
+
+#[doc(hidden)]
+trait MultiCharEq {
+ fn matches(&mut self, c: char) -> bool;
+}
+
+impl<F> MultiCharEq for F
+where
+ F: FnMut(char) -> bool,
+{
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ (*self)(c)
+ }
+}
+
+impl MultiCharEq for &[char] {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ self.iter().any(|&m| m == c)
+ }
+}
+
+struct MultiCharEqPattern<C: MultiCharEq>(C);
+
+#[derive(Clone, Debug)]
+struct MultiCharEqSearcher<'a, C: MultiCharEq> {
+ char_eq: C,
+ haystack: &'a str,
+ char_indices: super::CharIndices<'a>,
+}
+
+impl<'a, C: MultiCharEq> Pattern<'a> for MultiCharEqPattern<C> {
+ type Searcher = MultiCharEqSearcher<'a, C>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> MultiCharEqSearcher<'a, C> {
+ MultiCharEqSearcher { haystack, char_eq: self.0, char_indices: haystack.char_indices() }
+ }
+}
+
+unsafe impl<'a, C: MultiCharEq> Searcher<'a> for MultiCharEqSearcher<'a, C> {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.haystack
+ }
+
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ let s = &mut self.char_indices;
+ // Compare lengths of the internal byte slice iterator
+ // to find length of current char
+ let pre_len = s.iter.iter.len();
+ if let Some((i, c)) = s.next() {
+ let len = s.iter.iter.len();
+ let char_len = pre_len - len;
+ if self.char_eq.matches(c) {
+ return SearchStep::Match(i, i + char_len);
+ } else {
+ return SearchStep::Reject(i, i + char_len);
+ }
+ }
+ SearchStep::Done
+ }
+}
+
+unsafe impl<'a, C: MultiCharEq> ReverseSearcher<'a> for MultiCharEqSearcher<'a, C> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ let s = &mut self.char_indices;
+ // Compare lengths of the internal byte slice iterator
+ // to find length of current char
+ let pre_len = s.iter.iter.len();
+ if let Some((i, c)) = s.next_back() {
+ let len = s.iter.iter.len();
+ let char_len = pre_len - len;
+ if self.char_eq.matches(c) {
+ return SearchStep::Match(i, i + char_len);
+ } else {
+ return SearchStep::Reject(i, i + char_len);
+ }
+ }
+ SearchStep::Done
+ }
+}
+
+impl<'a, C: MultiCharEq> DoubleEndedSearcher<'a> for MultiCharEqSearcher<'a, C> {}
+
+/////////////////////////////////////////////////////////////////////////////
+
+macro_rules! pattern_methods {
+ ($t:ty, $pmap:expr, $smap:expr) => {
+ type Searcher = $t;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> $t {
+ ($smap)(($pmap)(self).into_searcher(haystack))
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ ($pmap)(self).is_contained_in(haystack)
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ ($pmap)(self).is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ ($pmap)(self).strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool
+ where
+ $t: ReverseSearcher<'a>,
+ {
+ ($pmap)(self).is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str>
+ where
+ $t: ReverseSearcher<'a>,
+ {
+ ($pmap)(self).strip_suffix_of(haystack)
+ }
+ };
+}
+
+macro_rules! searcher_methods {
+ (forward) => {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.0.haystack()
+ }
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ self.0.next()
+ }
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ self.0.next_match()
+ }
+ #[inline]
+ fn next_reject(&mut self) -> Option<(usize, usize)> {
+ self.0.next_reject()
+ }
+ };
+ (reverse) => {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ self.0.next_back()
+ }
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ self.0.next_match_back()
+ }
+ #[inline]
+ fn next_reject_back(&mut self) -> Option<(usize, usize)> {
+ self.0.next_reject_back()
+ }
+ };
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &[char]
+/////////////////////////////////////////////////////////////////////////////
+
+// Todo: Change / Remove due to ambiguity in meaning.
+
+/// Associated type for `<&[char] as Pattern<'a>>::Searcher`.
+#[derive(Clone, Debug)]
+pub struct CharSliceSearcher<'a, 'b>(<MultiCharEqPattern<&'b [char]> as Pattern<'a>>::Searcher);
+
+unsafe impl<'a, 'b> Searcher<'a> for CharSliceSearcher<'a, 'b> {
+ searcher_methods!(forward);
+}
+
+unsafe impl<'a, 'b> ReverseSearcher<'a> for CharSliceSearcher<'a, 'b> {
+ searcher_methods!(reverse);
+}
+
+impl<'a, 'b> DoubleEndedSearcher<'a> for CharSliceSearcher<'a, 'b> {}
+
+/// Searches for chars that are equal to any of the [`char`]s in the slice.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find(&['l', 'l'] as &[_]), Some(2));
+/// assert_eq!("Hello world".find(&['l', 'l'][..]), Some(2));
+/// ```
+impl<'a, 'b> Pattern<'a> for &'b [char] {
+ pattern_methods!(CharSliceSearcher<'a, 'b>, MultiCharEqPattern, CharSliceSearcher);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for F: FnMut(char) -> bool
+/////////////////////////////////////////////////////////////////////////////
+
+/// Associated type for `<F as Pattern<'a>>::Searcher`.
+#[derive(Clone)]
+pub struct CharPredicateSearcher<'a, F>(<MultiCharEqPattern<F> as Pattern<'a>>::Searcher)
+where
+ F: FnMut(char) -> bool;
+
+impl<F> fmt::Debug for CharPredicateSearcher<'_, F>
+where
+ F: FnMut(char) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CharPredicateSearcher")
+ .field("haystack", &self.0.haystack)
+ .field("char_indices", &self.0.char_indices)
+ .finish()
+ }
+}
+unsafe impl<'a, F> Searcher<'a> for CharPredicateSearcher<'a, F>
+where
+ F: FnMut(char) -> bool,
+{
+ searcher_methods!(forward);
+}
+
+unsafe impl<'a, F> ReverseSearcher<'a> for CharPredicateSearcher<'a, F>
+where
+ F: FnMut(char) -> bool,
+{
+ searcher_methods!(reverse);
+}
+
+impl<'a, F> DoubleEndedSearcher<'a> for CharPredicateSearcher<'a, F> where F: FnMut(char) -> bool {}
+
+/// Searches for [`char`]s that match the given predicate.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find(char::is_uppercase), Some(0));
+/// assert_eq!("Hello world".find(|c| "aeiou".contains(c)), Some(1));
+/// ```
+impl<'a, F> Pattern<'a> for F
+where
+ F: FnMut(char) -> bool,
+{
+ pattern_methods!(CharPredicateSearcher<'a, F>, MultiCharEqPattern, CharPredicateSearcher);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &&str
+/////////////////////////////////////////////////////////////////////////////
+
+/// Delegates to the `&str` impl.
+impl<'a, 'b, 'c> Pattern<'a> for &'c &'b str {
+ pattern_methods!(StrSearcher<'a, 'b>, |&s| s, |s| s);
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Impl for &str
+/////////////////////////////////////////////////////////////////////////////
+
+/// Non-allocating substring search.
+///
+/// Will handle the pattern `""` as returning empty matches at each character
+/// boundary.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!("Hello world".find("world"), Some(6));
+/// ```
+impl<'a, 'b> Pattern<'a> for &'b str {
+ type Searcher = StrSearcher<'a, 'b>;
+
+ #[inline]
+ fn into_searcher(self, haystack: &'a str) -> StrSearcher<'a, 'b> {
+ StrSearcher::new(haystack, self)
+ }
+
+ /// Checks whether the pattern matches at the front of the haystack.
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ haystack.as_bytes().starts_with(self.as_bytes())
+ }
+
+ /// Removes the pattern from the front of haystack, if it matches.
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ if self.is_prefix_of(haystack) {
+ // SAFETY: prefix was just verified to exist.
+ unsafe { Some(haystack.get_unchecked(self.as_bytes().len()..)) }
+ } else {
+ None
+ }
+ }
+
+ /// Checks whether the pattern matches at the back of the haystack.
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool {
+ haystack.as_bytes().ends_with(self.as_bytes())
+ }
+
+ /// Removes the pattern from the back of haystack, if it matches.
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+ if self.is_suffix_of(haystack) {
+ let i = haystack.len() - self.as_bytes().len();
+ // SAFETY: suffix was just verified to exist.
+ unsafe { Some(haystack.get_unchecked(..i)) }
+ } else {
+ None
+ }
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// Two Way substring searcher
+/////////////////////////////////////////////////////////////////////////////
+
+#[derive(Clone, Debug)]
+/// Associated type for `<&str as Pattern<'a>>::Searcher`.
+pub struct StrSearcher<'a, 'b> {
+ haystack: &'a str,
+ needle: &'b str,
+
+ searcher: StrSearcherImpl,
+}
+
+#[derive(Clone, Debug)]
+enum StrSearcherImpl {
+ Empty(EmptyNeedle),
+ TwoWay(TwoWaySearcher),
+}
+
+#[derive(Clone, Debug)]
+struct EmptyNeedle {
+ position: usize,
+ end: usize,
+ is_match_fw: bool,
+ is_match_bw: bool,
+}
+
+impl<'a, 'b> StrSearcher<'a, 'b> {
+ fn new(haystack: &'a str, needle: &'b str) -> StrSearcher<'a, 'b> {
+ if needle.is_empty() {
+ StrSearcher {
+ haystack,
+ needle,
+ searcher: StrSearcherImpl::Empty(EmptyNeedle {
+ position: 0,
+ end: haystack.len(),
+ is_match_fw: true,
+ is_match_bw: true,
+ }),
+ }
+ } else {
+ StrSearcher {
+ haystack,
+ needle,
+ searcher: StrSearcherImpl::TwoWay(TwoWaySearcher::new(
+ needle.as_bytes(),
+ haystack.len(),
+ )),
+ }
+ }
+ }
+}
+
+unsafe impl<'a, 'b> Searcher<'a> for StrSearcher<'a, 'b> {
+ #[inline]
+ fn haystack(&self) -> &'a str {
+ self.haystack
+ }
+
+ #[inline]
+ fn next(&mut self) -> SearchStep {
+ match self.searcher {
+ StrSearcherImpl::Empty(ref mut searcher) => {
+ // empty needle rejects every char and matches every empty string between them
+ let is_match = searcher.is_match_fw;
+ searcher.is_match_fw = !searcher.is_match_fw;
+ let pos = searcher.position;
+ match self.haystack[pos..].chars().next() {
+ _ if is_match => SearchStep::Match(pos, pos),
+ None => SearchStep::Done,
+ Some(ch) => {
+ searcher.position += ch.len_utf8();
+ SearchStep::Reject(pos, searcher.position)
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ // TwoWaySearcher produces valid *Match* indices that split at char boundaries
+ // as long as it does correct matching and that haystack and needle are
+ // valid UTF-8
+ // *Rejects* from the algorithm can fall on any indices, but we will walk them
+ // manually to the next character boundary, so that they are utf-8 safe.
+ if searcher.position == self.haystack.len() {
+ return SearchStep::Done;
+ }
+ let is_long = searcher.memory == usize::MAX;
+ match searcher.next::<RejectAndMatch>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ is_long,
+ ) {
+ SearchStep::Reject(a, mut b) => {
+ // skip to next char boundary
+ while !self.haystack.is_char_boundary(b) {
+ b += 1;
+ }
+ searcher.position = cmp::max(b, searcher.position);
+ SearchStep::Reject(a, b)
+ }
+ otherwise => otherwise,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn next_match(&mut self) -> Option<(usize, usize)> {
+ match self.searcher {
+ StrSearcherImpl::Empty(..) => loop {
+ match self.next() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ SearchStep::Reject(..) => {}
+ }
+ },
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ let is_long = searcher.memory == usize::MAX;
+ // write out `true` and `false` cases to encourage the compiler
+ // to specialize the two cases separately.
+ if is_long {
+ searcher.next::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ true,
+ )
+ } else {
+ searcher.next::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ false,
+ )
+ }
+ }
+ }
+ }
+}
+
+unsafe impl<'a, 'b> ReverseSearcher<'a> for StrSearcher<'a, 'b> {
+ #[inline]
+ fn next_back(&mut self) -> SearchStep {
+ match self.searcher {
+ StrSearcherImpl::Empty(ref mut searcher) => {
+ let is_match = searcher.is_match_bw;
+ searcher.is_match_bw = !searcher.is_match_bw;
+ let end = searcher.end;
+ match self.haystack[..end].chars().next_back() {
+ _ if is_match => SearchStep::Match(end, end),
+ None => SearchStep::Done,
+ Some(ch) => {
+ searcher.end -= ch.len_utf8();
+ SearchStep::Reject(searcher.end, end)
+ }
+ }
+ }
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ if searcher.end == 0 {
+ return SearchStep::Done;
+ }
+ let is_long = searcher.memory == usize::MAX;
+ match searcher.next_back::<RejectAndMatch>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ is_long,
+ ) {
+ SearchStep::Reject(mut a, b) => {
+ // skip to next char boundary
+ while !self.haystack.is_char_boundary(a) {
+ a -= 1;
+ }
+ searcher.end = cmp::min(a, searcher.end);
+ SearchStep::Reject(a, b)
+ }
+ otherwise => otherwise,
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn next_match_back(&mut self) -> Option<(usize, usize)> {
+ match self.searcher {
+ StrSearcherImpl::Empty(..) => loop {
+ match self.next_back() {
+ SearchStep::Match(a, b) => return Some((a, b)),
+ SearchStep::Done => return None,
+ SearchStep::Reject(..) => {}
+ }
+ },
+ StrSearcherImpl::TwoWay(ref mut searcher) => {
+ let is_long = searcher.memory == usize::MAX;
+ // write out `true` and `false`, like `next_match`
+ if is_long {
+ searcher.next_back::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ true,
+ )
+ } else {
+ searcher.next_back::<MatchOnly>(
+ self.haystack.as_bytes(),
+ self.needle.as_bytes(),
+ false,
+ )
+ }
+ }
+ }
+ }
+}
+
+/// The internal state of the two-way substring search algorithm.
+#[derive(Clone, Debug)]
+struct TwoWaySearcher {
+ // constants
+ /// critical factorization index
+ crit_pos: usize,
+ /// critical factorization index for reversed needle
+ crit_pos_back: usize,
+ period: usize,
+ /// `byteset` is an extension (not part of the two way algorithm);
+ /// it's a 64-bit "fingerprint" where each set bit `j` corresponds
+ /// to a (byte & 63) == j present in the needle.
+ byteset: u64,
+
+ // variables
+ position: usize,
+ end: usize,
+ /// index into needle before which we have already matched
+ memory: usize,
+ /// index into needle after which we have already matched
+ memory_back: usize,
+}
+
+/*
+ This is the Two-Way search algorithm, which was introduced in the paper:
+ Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+
+ Here's some background information.
+
+ A *word* is a string of symbols. The *length* of a word should be a familiar
+ notion, and here we denote it for any word x by |x|.
+ (We also allow for the possibility of the *empty word*, a word of length zero).
+
+ If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
+ *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
+ For example, both 1 and 2 are periods for the string "aa". As another example,
+ the only period of the string "abcd" is 4.
+
+ We denote by period(x) the *smallest* period of x (provided that x is non-empty).
+ This is always well-defined since every non-empty word x has at least one period,
+ |x|. We sometimes call this *the period* of x.
+
+ If u, v and x are words such that x = uv, where uv is the concatenation of u and
+ v, then we say that (u, v) is a *factorization* of x.
+
+ Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
+ that both of the following hold
+
+ - either w is a suffix of u or u is a suffix of w
+ - either w is a prefix of v or v is a prefix of w
+
+ then w is said to be a *repetition* for the factorization (u, v).
+
+ Just to unpack this, there are four possibilities here. Let w = "abc". Then we
+ might have:
+
+ - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
+ - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
+ - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
+ - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
+
+ Note that the word vu is a repetition for any factorization (u,v) of x = uv,
+ so every factorization has at least one repetition.
+
+ If x is a string and (u, v) is a factorization for x, then a *local period* for
+ (u, v) is an integer r such that there is some word w such that |w| = r and w is
+ a repetition for (u, v).
+
+ We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
+ call this *the local period* of (u, v). Provided that x = uv is non-empty, this
+ is well-defined (because each non-empty word has at least one factorization, as
+ noted above).
+
+ It can be proven that the following is an equivalent definition of a local period
+ for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
+ all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
+ defined. (i.e., i > 0 and i + r < |x|).
+
+ Using the above reformulation, it is easy to prove that
+
+ 1 <= local_period(u, v) <= period(uv)
+
+ A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
+ *critical factorization*.
+
+ The algorithm hinges on the following theorem, which is stated without proof:
+
+ **Critical Factorization Theorem** Any word x has at least one critical
+ factorization (u, v) such that |u| < period(x).
+
+ The purpose of maximal_suffix is to find such a critical factorization.
+
+ If the period is short, compute another factorization x = u' v' to use
+ for reverse search, chosen instead so that |v'| < period(x).
+
+*/
+impl TwoWaySearcher {
+ fn new(needle: &[u8], end: usize) -> TwoWaySearcher {
+ let (crit_pos_false, period_false) = TwoWaySearcher::maximal_suffix(needle, false);
+ let (crit_pos_true, period_true) = TwoWaySearcher::maximal_suffix(needle, true);
+
+ let (crit_pos, period) = if crit_pos_false > crit_pos_true {
+ (crit_pos_false, period_false)
+ } else {
+ (crit_pos_true, period_true)
+ };
+
+ // A particularly readable explanation of what's going on here can be found
+ // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
+ // see the code for "Algorithm CP" on p. 323.
+ //
+ // What's going on is we have some critical factorization (u, v) of the
+ // needle, and we want to determine whether u is a suffix of
+ // &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
+ // "Algorithm CP2", which is optimized for when the period of the needle
+ // is large.
+ if needle[..crit_pos] == needle[period..period + crit_pos] {
+ // short period case -- the period is exact
+ // compute a separate critical factorization for the reversed needle
+ // x = u' v' where |v'| < period(x).
+ //
+ // This is sped up by the period being known already.
+ // Note that a case like x = "acba" may be factored exactly forwards
+ // (crit_pos = 1, period = 3) while being factored with approximate
+ // period in reverse (crit_pos = 2, period = 2). We use the given
+ // reverse factorization but keep the exact period.
+ let crit_pos_back = needle.len()
+ - cmp::max(
+ TwoWaySearcher::reverse_maximal_suffix(needle, period, false),
+ TwoWaySearcher::reverse_maximal_suffix(needle, period, true),
+ );
+
+ TwoWaySearcher {
+ crit_pos,
+ crit_pos_back,
+ period,
+ byteset: Self::byteset_create(&needle[..period]),
+
+ position: 0,
+ end,
+ memory: 0,
+ memory_back: needle.len(),
+ }
+ } else {
+ // long period case -- we have an approximation to the actual period,
+ // and don't use memorization.
+ //
+ // Approximate the period by lower bound max(|u|, |v|) + 1.
+ // The critical factorization is efficient to use for both forward and
+ // reverse search.
+
+ TwoWaySearcher {
+ crit_pos,
+ crit_pos_back: crit_pos,
+ period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
+ byteset: Self::byteset_create(needle),
+
+ position: 0,
+ end,
+ memory: usize::MAX, // Dummy value to signify that the period is long
+ memory_back: usize::MAX,
+ }
+ }
+ }
+
+ #[inline]
+ fn byteset_create(bytes: &[u8]) -> u64 {
+ bytes.iter().fold(0, |a, &b| (1 << (b & 0x3f)) | a)
+ }
+
+ #[inline]
+ fn byteset_contains(&self, byte: u8) -> bool {
+ (self.byteset >> ((byte & 0x3f) as usize)) & 1 != 0
+ }
+
+ // One of the main ideas of Two-Way is that we factorize the needle into
+ // two halves, (u, v), and begin trying to find v in the haystack by scanning
+ // left to right. If v matches, we try to match u by scanning right to left.
+ // How far we can jump when we encounter a mismatch is all based on the fact
+ // that (u, v) is a critical factorization for the needle.
+ #[inline]
+ fn next<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
+ where
+ S: TwoWayStrategy,
+ {
+ // `next()` uses `self.position` as its cursor
+ let old_pos = self.position;
+ let needle_last = needle.len() - 1;
+ 'search: loop {
+ // Check that we have room to search in
+ // position + needle_last can not overflow if we assume slices
+ // are bounded by isize's range.
+ let tail_byte = match haystack.get(self.position + needle_last) {
+ Some(&b) => b,
+ None => {
+ self.position = haystack.len();
+ return S::rejecting(old_pos, self.position);
+ }
+ };
+
+ if S::use_early_reject() && old_pos != self.position {
+ return S::rejecting(old_pos, self.position);
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if !self.byteset_contains(tail_byte) {
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+
+ // See if the right part of the needle matches
+ let start =
+ if long_period { self.crit_pos } else { cmp::max(self.crit_pos, self.memory) };
+ for i in start..needle.len() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += i - self.crit_pos + 1;
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the left part of the needle matches
+ let start = if long_period { 0 } else { self.memory };
+ for i in (start..self.crit_pos).rev() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += self.period;
+ if !long_period {
+ self.memory = needle.len() - self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.position;
+
+ // Note: add self.period instead of needle.len() to have overlapping matches
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0; // set to needle.len() - self.period for overlapping matches
+ }
+
+ return S::matching(match_pos, match_pos + needle.len());
+ }
+ }
+
+ // Follows the ideas in `next()`.
+ //
+ // The definitions are symmetrical, with period(x) = period(reverse(x))
+ // and local_period(u, v) = local_period(reverse(v), reverse(u)), so if (u, v)
+ // is a critical factorization, so is (reverse(v), reverse(u)).
+ //
+ // For the reverse case we have computed a critical factorization x = u' v'
+ // (field `crit_pos_back`). We need |u| < period(x) for the forward case and
+ // thus |v'| < period(x) for the reverse.
+ //
+ // To search in reverse through the haystack, we search forward through
+ // a reversed haystack with a reversed needle, matching first u' and then v'.
+ #[inline]
+ fn next_back<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
+ where
+ S: TwoWayStrategy,
+ {
+ // `next_back()` uses `self.end` as its cursor -- so that `next()` and `next_back()`
+ // are independent.
+ let old_end = self.end;
+ 'search: loop {
+ // Check that we have room to search in
+ // end - needle.len() will wrap around when there is no more room,
+ // but due to slice length limits it can never wrap all the way back
+ // into the length of haystack.
+ let front_byte = match haystack.get(self.end.wrapping_sub(needle.len())) {
+ Some(&b) => b,
+ None => {
+ self.end = 0;
+ return S::rejecting(0, old_end);
+ }
+ };
+
+ if S::use_early_reject() && old_end != self.end {
+ return S::rejecting(self.end, old_end);
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if !self.byteset_contains(front_byte) {
+ self.end -= needle.len();
+ if !long_period {
+ self.memory_back = needle.len();
+ }
+ continue 'search;
+ }
+
+ // See if the left part of the needle matches
+ let crit = if long_period {
+ self.crit_pos_back
+ } else {
+ cmp::min(self.crit_pos_back, self.memory_back)
+ };
+ for i in (0..crit).rev() {
+ if needle[i] != haystack[self.end - needle.len() + i] {
+ self.end -= self.crit_pos_back - i;
+ if !long_period {
+ self.memory_back = needle.len();
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the right part of the needle matches
+ let needle_end = if long_period { needle.len() } else { self.memory_back };
+ for i in self.crit_pos_back..needle_end {
+ if needle[i] != haystack[self.end - needle.len() + i] {
+ self.end -= self.period;
+ if !long_period {
+ self.memory_back = self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.end - needle.len();
+ // Note: sub self.period instead of needle.len() to have overlapping matches
+ self.end -= needle.len();
+ if !long_period {
+ self.memory_back = needle.len();
+ }
+
+ return S::matching(match_pos, match_pos + needle.len());
+ }
+ }
+
+ // Compute the maximal suffix of `arr`.
+ //
+ // The maximal suffix is a possible critical factorization (u, v) of `arr`.
+ //
+ // Returns (`i`, `p`) where `i` is the starting index of v and `p` is the
+ // period of v.
+ //
+ // `order_greater` determines if lexical order is `<` or `>`. Both
+ // orders must be computed -- the ordering with the largest `i` gives
+ // a critical factorization.
+ //
+ // For long period cases, the resulting period is not exact (it is too short).
+ #[inline]
+ fn maximal_suffix(arr: &[u8], order_greater: bool) -> (usize, usize) {
+ let mut left = 0; // Corresponds to i in the paper
+ let mut right = 1; // Corresponds to j in the paper
+ let mut offset = 0; // Corresponds to k in the paper, but starting at 0
+ // to match 0-based indexing.
+ let mut period = 1; // Corresponds to p in the paper
+
+ while let Some(&a) = arr.get(right + offset) {
+ // `left` will be inbounds when `right` is.
+ let b = arr[left + offset];
+ if (a < b && !order_greater) || (a > b && order_greater) {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset + 1;
+ offset = 0;
+ period = right - left;
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset + 1 == period {
+ right += offset + 1;
+ offset = 0;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 0;
+ period = 1;
+ }
+ }
+ (left, period)
+ }
+
+ // Compute the maximal suffix of the reverse of `arr`.
+ //
+ // The maximal suffix is a possible critical factorization (u', v') of `arr`.
+ //
+ // Returns `i` where `i` is the starting index of v', from the back;
+ // returns immediately when a period of `known_period` is reached.
+ //
+ // `order_greater` determines if lexical order is `<` or `>`. Both
+ // orders must be computed -- the ordering with the largest `i` gives
+ // a critical factorization.
+ //
+ // For long period cases, the resulting period is not exact (it is too short).
+ fn reverse_maximal_suffix(arr: &[u8], known_period: usize, order_greater: bool) -> usize {
+ let mut left = 0; // Corresponds to i in the paper
+ let mut right = 1; // Corresponds to j in the paper
+ let mut offset = 0; // Corresponds to k in the paper, but starting at 0
+ // to match 0-based indexing.
+ let mut period = 1; // Corresponds to p in the paper
+ let n = arr.len();
+
+ while right + offset < n {
+ let a = arr[n - (1 + right + offset)];
+ let b = arr[n - (1 + left + offset)];
+ if (a < b && !order_greater) || (a > b && order_greater) {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset + 1;
+ offset = 0;
+ period = right - left;
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset + 1 == period {
+ right += offset + 1;
+ offset = 0;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 0;
+ period = 1;
+ }
+ if period == known_period {
+ break;
+ }
+ }
+ debug_assert!(period <= known_period);
+ left
+ }
+}
+
+// TwoWayStrategy allows the algorithm to either skip non-matches as quickly
+// as possible, or to work in a mode where it emits Rejects relatively quickly.
+trait TwoWayStrategy {
+ type Output;
+ fn use_early_reject() -> bool;
+ fn rejecting(a: usize, b: usize) -> Self::Output;
+ fn matching(a: usize, b: usize) -> Self::Output;
+}
+
+/// Skip to match intervals as quickly as possible
+enum MatchOnly {}
+
+impl TwoWayStrategy for MatchOnly {
+ type Output = Option<(usize, usize)>;
+
+ #[inline]
+ fn use_early_reject() -> bool {
+ false
+ }
+ #[inline]
+ fn rejecting(_a: usize, _b: usize) -> Self::Output {
+ None
+ }
+ #[inline]
+ fn matching(a: usize, b: usize) -> Self::Output {
+ Some((a, b))
+ }
+}
+
+/// Emit Rejects regularly
+enum RejectAndMatch {}
+
+impl TwoWayStrategy for RejectAndMatch {
+ type Output = SearchStep;
+
+ #[inline]
+ fn use_early_reject() -> bool {
+ true
+ }
+ #[inline]
+ fn rejecting(a: usize, b: usize) -> Self::Output {
+ SearchStep::Reject(a, b)
+ }
+ #[inline]
+ fn matching(a: usize, b: usize) -> Self::Output {
+ SearchStep::Match(a, b)
+ }
+}
--- /dev/null
+//! Trait implementations for `str`.
+
+use crate::cmp::Ordering;
+use crate::ops;
+use crate::ptr;
+use crate::slice::SliceIndex;
+
+use super::ParseBoolError;
+
+/// Implements ordering of strings.
+///
+/// Strings are ordered [lexicographically](Ord#lexicographical-comparison) by their byte values. This orders Unicode code
+/// points based on their positions in the code charts. This is not necessarily the same as
+/// "alphabetical" order, which varies by language and locale. Sorting strings according to
+/// culturally-accepted standards requires locale-specific data that is outside the scope of
+/// the `str` type.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Ord for str {
+ #[inline]
+ fn cmp(&self, other: &str) -> Ordering {
+ self.as_bytes().cmp(other.as_bytes())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for str {
+ #[inline]
+ fn eq(&self, other: &str) -> bool {
+ self.as_bytes() == other.as_bytes()
+ }
+ #[inline]
+ fn ne(&self, other: &str) -> bool {
+ !(*self).eq(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Eq for str {}
+
+/// Implements comparison operations on strings.
+///
+/// Strings are compared [lexicographically](Ord#lexicographical-comparison) by their byte values. This compares Unicode code
+/// points based on their positions in the code charts. This is not necessarily the same as
+/// "alphabetical" order, which varies by language and locale. Comparing strings according to
+/// culturally-accepted standards requires locale-specific data that is outside the scope of
+/// the `str` type.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialOrd for str {
+ #[inline]
+ fn partial_cmp(&self, other: &str) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ops::Index<I> for str
+where
+ I: SliceIndex<str>,
+{
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &I::Output {
+ index.index(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I> ops::IndexMut<I> for str
+where
+ I: SliceIndex<str>,
+{
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut I::Output {
+ index.index_mut(self)
+ }
+}
+
+#[inline(never)]
+#[cold]
+#[track_caller]
+fn str_index_overflow_fail() -> ! {
+ panic!("attempted to index str up to maximum usize");
+}
+
+/// Implements substring slicing with syntax `&self[..]` or `&mut self[..]`.
+///
+/// Returns a slice of the whole string, i.e., returns `&self` or `&mut
+/// self`. Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`. Unlike
+/// other indexing operations, this can never panic.
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`.
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+unsafe impl SliceIndex<str> for ops::RangeFull {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ Some(slice)
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ Some(slice)
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ slice
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ slice
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ slice
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ slice
+ }
+}
+
+/// Implements substring slicing with syntax `&self[begin .. end]` or `&mut
+/// self[begin .. end]`.
+///
+/// Returns a slice of the given string from the byte range
+/// [`begin`, `end`).
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// # Panics
+///
+/// Panics if `begin` or `end` does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), if `begin > end`, or if
+/// `end > len`.
+///
+/// # Examples
+///
+/// ```
+/// let s = "Löwe 老虎 Léopard";
+/// assert_eq!(&s[0 .. 1], "L");
+///
+/// assert_eq!(&s[1 .. 9], "öwe 老");
+///
+/// // these will panic:
+/// // byte 2 lies within `ö`:
+/// // &s[2 ..3];
+///
+/// // byte 8 lies within `老`
+/// // &s[1 .. 8];
+///
+/// // byte 100 is outside the string
+/// // &s[3 .. 100];
+/// ```
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+unsafe impl SliceIndex<str> for ops::Range<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if self.start <= self.end
+ && slice.is_char_boundary(self.start)
+ && slice.is_char_boundary(self.end)
+ {
+ // SAFETY: just checked that `start` and `end` are on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ // We also checked char boundaries, so this is valid UTF-8.
+ Some(unsafe { &*self.get_unchecked(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if self.start <= self.end
+ && slice.is_char_boundary(self.start)
+ && slice.is_char_boundary(self.end)
+ {
+ // SAFETY: just checked that `start` and `end` are on a char boundary.
+ // We know the pointer is unique because we got it from `slice`.
+ Some(unsafe { &mut *self.get_unchecked_mut(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ let slice = slice as *const [u8];
+ // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+ // which satisfies all the conditions for `add`.
+ let ptr = unsafe { slice.as_ptr().add(self.start) };
+ let len = self.end - self.start;
+ ptr::slice_from_raw_parts(ptr, len) as *const str
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ let slice = slice as *mut [u8];
+ // SAFETY: see comments for `get_unchecked`.
+ let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
+ let len = self.end - self.start;
+ ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ let (start, end) = (self.start, self.end);
+ match self.get(slice) {
+ Some(s) => s,
+ None => super::slice_error_fail(slice, start, end),
+ }
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ // is_char_boundary checks that the index is in [0, .len()]
+ // cannot reuse `get` as above, because of NLL trouble
+ if self.start <= self.end
+ && slice.is_char_boundary(self.start)
+ && slice.is_char_boundary(self.end)
+ {
+ // SAFETY: just checked that `start` and `end` are on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ super::slice_error_fail(slice, self.start, self.end)
+ }
+ }
+}
+
+/// Implements substring slicing with syntax `&self[.. end]` or `&mut
+/// self[.. end]`.
+///
+/// Returns a slice of the given string from the byte range [`0`, `end`).
+/// Equivalent to `&self[0 .. end]` or `&mut self[0 .. end]`.
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// # Panics
+///
+/// Panics if `end` does not point to the starting byte offset of a
+/// character (as defined by `is_char_boundary`), or if `end > len`.
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+unsafe impl SliceIndex<str> for ops::RangeTo<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if slice.is_char_boundary(self.end) {
+ // SAFETY: just checked that `end` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &*self.get_unchecked(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if slice.is_char_boundary(self.end) {
+ // SAFETY: just checked that `end` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &mut *self.get_unchecked_mut(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ let slice = slice as *const [u8];
+ let ptr = slice.as_ptr();
+ ptr::slice_from_raw_parts(ptr, self.end) as *const str
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ let slice = slice as *mut [u8];
+ let ptr = slice.as_mut_ptr();
+ ptr::slice_from_raw_parts_mut(ptr, self.end) as *mut str
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ let end = self.end;
+ match self.get(slice) {
+ Some(s) => s,
+ None => super::slice_error_fail(slice, 0, end),
+ }
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if slice.is_char_boundary(self.end) {
+ // SAFETY: just checked that `end` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ super::slice_error_fail(slice, 0, self.end)
+ }
+ }
+}
+
+/// Implements substring slicing with syntax `&self[begin ..]` or `&mut
+/// self[begin ..]`.
+///
+/// Returns a slice of the given string from the byte range [`begin`,
+/// `len`). Equivalent to `&self[begin .. len]` or `&mut self[begin ..
+/// len]`.
+///
+/// This operation is *O*(1).
+///
+/// Prior to 1.20.0, these indexing operations were still supported by
+/// direct implementation of `Index` and `IndexMut`.
+///
+/// # Panics
+///
+/// Panics if `begin` does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), or if `begin > len`.
+#[stable(feature = "str_checked_slicing", since = "1.20.0")]
+unsafe impl SliceIndex<str> for ops::RangeFrom<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if slice.is_char_boundary(self.start) {
+ // SAFETY: just checked that `start` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &*self.get_unchecked(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if slice.is_char_boundary(self.start) {
+ // SAFETY: just checked that `start` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ Some(unsafe { &mut *self.get_unchecked_mut(slice) })
+ } else {
+ None
+ }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ let slice = slice as *const [u8];
+ // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+ // which satisfies all the conditions for `add`.
+ let ptr = unsafe { slice.as_ptr().add(self.start) };
+ let len = slice.len() - self.start;
+ ptr::slice_from_raw_parts(ptr, len) as *const str
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ let slice = slice as *mut [u8];
+ // SAFETY: identical to `get_unchecked`.
+ let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
+ let len = slice.len() - self.start;
+ ptr::slice_from_raw_parts_mut(ptr, len) as *mut str
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ let (start, end) = (self.start, slice.len());
+ match self.get(slice) {
+ Some(s) => s,
+ None => super::slice_error_fail(slice, start, end),
+ }
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if slice.is_char_boundary(self.start) {
+ // SAFETY: just checked that `start` is on a char boundary,
+ // and we are passing in a safe reference, so the return value will also be one.
+ unsafe { &mut *self.get_unchecked_mut(slice) }
+ } else {
+ super::slice_error_fail(slice, self.start, slice.len())
+ }
+ }
+}
+
+/// Implements substring slicing with syntax `&self[begin ..= end]` or `&mut
+/// self[begin ..= end]`.
+///
+/// Returns a slice of the given string from the byte range
+/// [`begin`, `end`]. Equivalent to `&self [begin .. end + 1]` or `&mut
+/// self[begin .. end + 1]`, except if `end` has the maximum value for
+/// `usize`.
+///
+/// This operation is *O*(1).
+///
+/// # Panics
+///
+/// Panics if `begin` does not point to the starting byte offset of
+/// a character (as defined by `is_char_boundary`), if `end` does not point
+/// to the ending byte offset of a character (`end + 1` is either a starting
+/// byte offset or equal to `len`), if `begin > end`, or if `end >= len`.
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+unsafe impl SliceIndex<str> for ops::RangeInclusive<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+ unsafe { self.into_slice_range().get_unchecked(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+ unsafe { self.into_slice_range().get_unchecked_mut(slice) }
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ if *self.end() == usize::MAX {
+ str_index_overflow_fail();
+ }
+ self.into_slice_range().index(slice)
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if *self.end() == usize::MAX {
+ str_index_overflow_fail();
+ }
+ self.into_slice_range().index_mut(slice)
+ }
+}
+
+/// Implements substring slicing with syntax `&self[..= end]` or `&mut
+/// self[..= end]`.
+///
+/// Returns a slice of the given string from the byte range [0, `end`].
+/// Equivalent to `&self [0 .. end + 1]`, except if `end` has the maximum
+/// value for `usize`.
+///
+/// This operation is *O*(1).
+///
+/// # Panics
+///
+/// Panics if `end` does not point to the ending byte offset of a character
+/// (`end + 1` is either a starting byte offset as defined by
+/// `is_char_boundary`, or equal to `len`), or if `end >= len`.
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+unsafe impl SliceIndex<str> for ops::RangeToInclusive<usize> {
+ type Output = str;
+ #[inline]
+ fn get(self, slice: &str) -> Option<&Self::Output> {
+ if self.end == usize::MAX { None } else { (..self.end + 1).get(slice) }
+ }
+ #[inline]
+ fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
+ if self.end == usize::MAX { None } else { (..self.end + 1).get_mut(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+ unsafe { (..self.end + 1).get_unchecked(slice) }
+ }
+ #[inline]
+ unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
+ // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+ unsafe { (..self.end + 1).get_unchecked_mut(slice) }
+ }
+ #[inline]
+ fn index(self, slice: &str) -> &Self::Output {
+ if self.end == usize::MAX {
+ str_index_overflow_fail();
+ }
+ (..self.end + 1).index(slice)
+ }
+ #[inline]
+ fn index_mut(self, slice: &mut str) -> &mut Self::Output {
+ if self.end == usize::MAX {
+ str_index_overflow_fail();
+ }
+ (..self.end + 1).index_mut(slice)
+ }
+}
+
+/// Parse a value from a string
+///
+/// `FromStr`'s [`from_str`] method is often used implicitly, through
+/// [`str`]'s [`parse`] method. See [`parse`]'s documentation for examples.
+///
+/// [`from_str`]: FromStr::from_str
+/// [`parse`]: str::parse
+///
+/// `FromStr` does not have a lifetime parameter, and so you can only parse types
+/// that do not contain a lifetime parameter themselves. In other words, you can
+/// parse an `i32` with `FromStr`, but not a `&i32`. You can parse a struct that
+/// contains an `i32`, but not one that contains an `&i32`.
+///
+/// # Examples
+///
+/// Basic implementation of `FromStr` on an example `Point` type:
+///
+/// ```
+/// use std::str::FromStr;
+/// use std::num::ParseIntError;
+///
+/// #[derive(Debug, PartialEq)]
+/// struct Point {
+/// x: i32,
+/// y: i32
+/// }
+///
+/// impl FromStr for Point {
+/// type Err = ParseIntError;
+///
+/// fn from_str(s: &str) -> Result<Self, Self::Err> {
+/// let coords: Vec<&str> = s.trim_matches(|p| p == '(' || p == ')' )
+/// .split(',')
+/// .collect();
+///
+/// let x_fromstr = coords[0].parse::<i32>()?;
+/// let y_fromstr = coords[1].parse::<i32>()?;
+///
+/// Ok(Point { x: x_fromstr, y: y_fromstr })
+/// }
+/// }
+///
+/// let p = Point::from_str("(1,2)");
+/// assert_eq!(p.unwrap(), Point{ x: 1, y: 2} )
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait FromStr: Sized {
+ /// The associated error which can be returned from parsing.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Err;
+
+ /// Parses a string `s` to return a value of this type.
+ ///
+ /// If parsing succeeds, return the value inside [`Ok`], otherwise
+ /// when the string is ill-formatted return an error specific to the
+ /// inside [`Err`]. The error type is specific to implementation of the trait.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage with [`i32`][ithirtytwo], a type that implements `FromStr`:
+ ///
+ /// [ithirtytwo]: ../../std/primitive.i32.html
+ ///
+ /// ```
+ /// use std::str::FromStr;
+ ///
+ /// let s = "5";
+ /// let x = i32::from_str(s).unwrap();
+ ///
+ /// assert_eq!(5, x);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn from_str(s: &str) -> Result<Self, Self::Err>;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for bool {
+ type Err = ParseBoolError;
+
+ /// Parse a `bool` from a string.
+ ///
+ /// Yields a `Result<bool, ParseBoolError>`, because `s` may or may not
+ /// actually be parseable.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::str::FromStr;
+ ///
+ /// assert_eq!(FromStr::from_str("true"), Ok(true));
+ /// assert_eq!(FromStr::from_str("false"), Ok(false));
+ /// assert!(<bool as FromStr>::from_str("not even a boolean").is_err());
+ /// ```
+ ///
+ /// Note, in many cases, the `.parse()` method on `str` is more proper.
+ ///
+ /// ```
+ /// assert_eq!("true".parse(), Ok(true));
+ /// assert_eq!("false".parse(), Ok(false));
+ /// assert!("not even a boolean".parse::<bool>().is_err());
+ /// ```
+ #[inline]
+ fn from_str(s: &str) -> Result<bool, ParseBoolError> {
+ match s {
+ "true" => Ok(true),
+ "false" => Ok(false),
+ _ => Err(ParseBoolError { _priv: () }),
+ }
+ }
+}
--- /dev/null
+//! Operations related to UTF-8 validation.
+
+use crate::mem;
+
+use super::Utf8Error;
+
+/// Returns the initial codepoint accumulator for the first byte.
+/// The first byte is special, only want bottom 5 bits for width 2, 4 bits
+/// for width 3, and 3 bits for width 4.
+#[inline]
+fn utf8_first_byte(byte: u8, width: u32) -> u32 {
+ (byte & (0x7F >> width)) as u32
+}
+
+/// Returns the value of `ch` updated with continuation byte `byte`.
+#[inline]
+fn utf8_acc_cont_byte(ch: u32, byte: u8) -> u32 {
+ (ch << 6) | (byte & CONT_MASK) as u32
+}
+
+/// Checks whether the byte is a UTF-8 continuation byte (i.e., starts with the
+/// bits `10`).
+#[inline]
+pub(super) fn utf8_is_cont_byte(byte: u8) -> bool {
+ (byte & !CONT_MASK) == TAG_CONT_U8
+}
+
+#[inline]
+fn unwrap_or_0(opt: Option<&u8>) -> u8 {
+ match opt {
+ Some(&byte) => byte,
+ None => 0,
+ }
+}
+
+/// Reads the next code point out of a byte iterator (assuming a
+/// UTF-8-like encoding).
+#[unstable(feature = "str_internals", issue = "none")]
+#[inline]
+pub fn next_code_point<'a, I: Iterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
+ // Decode UTF-8
+ let x = *bytes.next()?;
+ if x < 128 {
+ return Some(x as u32);
+ }
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [[[x y] z] w]
+ // NOTE: Performance is sensitive to the exact formulation here
+ let init = utf8_first_byte(x, 2);
+ let y = unwrap_or_0(bytes.next());
+ let mut ch = utf8_acc_cont_byte(init, y);
+ if x >= 0xE0 {
+ // [[x y z] w] case
+ // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
+ let z = unwrap_or_0(bytes.next());
+ let y_z = utf8_acc_cont_byte((y & CONT_MASK) as u32, z);
+ ch = init << 12 | y_z;
+ if x >= 0xF0 {
+ // [x y z w] case
+ // use only the lower 3 bits of `init`
+ let w = unwrap_or_0(bytes.next());
+ ch = (init & 7) << 18 | utf8_acc_cont_byte(y_z, w);
+ }
+ }
+
+ Some(ch)
+}
+
+/// Reads the last code point out of a byte iterator (assuming a
+/// UTF-8-like encoding).
+#[inline]
+pub(super) fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option<u32>
+where
+ I: DoubleEndedIterator<Item = &'a u8>,
+{
+ // Decode UTF-8
+ let w = match *bytes.next_back()? {
+ next_byte if next_byte < 128 => return Some(next_byte as u32),
+ back_byte => back_byte,
+ };
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [x [y [z w]]]
+ let mut ch;
+ let z = unwrap_or_0(bytes.next_back());
+ ch = utf8_first_byte(z, 2);
+ if utf8_is_cont_byte(z) {
+ let y = unwrap_or_0(bytes.next_back());
+ ch = utf8_first_byte(y, 3);
+ if utf8_is_cont_byte(y) {
+ let x = unwrap_or_0(bytes.next_back());
+ ch = utf8_first_byte(x, 4);
+ ch = utf8_acc_cont_byte(ch, y);
+ }
+ ch = utf8_acc_cont_byte(ch, z);
+ }
+ ch = utf8_acc_cont_byte(ch, w);
+
+ Some(ch)
+}
+
+// use truncation to fit u64 into usize
+const NONASCII_MASK: usize = 0x80808080_80808080u64 as usize;
+
+/// Returns `true` if any byte in the word `x` is nonascii (>= 128).
+#[inline]
+fn contains_nonascii(x: usize) -> bool {
+ (x & NONASCII_MASK) != 0
+}
+
+/// Walks through `v` checking that it's a valid UTF-8 sequence,
+/// returning `Ok(())` in that case, or, if it is invalid, `Err(err)`.
+#[inline(always)]
+pub(super) fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
+ let mut index = 0;
+ let len = v.len();
+
+ let usize_bytes = mem::size_of::<usize>();
+ let ascii_block_size = 2 * usize_bytes;
+ let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
+ let align = v.as_ptr().align_offset(usize_bytes);
+
+ while index < len {
+ let old_offset = index;
+ macro_rules! err {
+ ($error_len: expr) => {
+ return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len });
+ };
+ }
+
+ macro_rules! next {
+ () => {{
+ index += 1;
+ // we needed data, but there was none: error!
+ if index >= len {
+ err!(None)
+ }
+ v[index]
+ }};
+ }
+
+ let first = v[index];
+ if first >= 128 {
+ let w = UTF8_CHAR_WIDTH[first as usize];
+ // 2-byte encoding is for codepoints \u{0080} to \u{07ff}
+ // first C2 80 last DF BF
+ // 3-byte encoding is for codepoints \u{0800} to \u{ffff}
+ // first E0 A0 80 last EF BF BF
+ // excluding surrogates codepoints \u{d800} to \u{dfff}
+ // ED A0 80 to ED BF BF
+ // 4-byte encoding is for codepoints \u{1000}0 to \u{10ff}ff
+ // first F0 90 80 80 last F4 8F BF BF
+ //
+ // Use the UTF-8 syntax from the RFC
+ //
+ // https://tools.ietf.org/html/rfc3629
+ // UTF8-1 = %x00-7F
+ // UTF8-2 = %xC2-DF UTF8-tail
+ // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
+ // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
+ // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
+ // %xF4 %x80-8F 2( UTF8-tail )
+ match w {
+ 2 => {
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(1))
+ }
+ }
+ 3 => {
+ match (first, next!()) {
+ (0xE0, 0xA0..=0xBF)
+ | (0xE1..=0xEC, 0x80..=0xBF)
+ | (0xED, 0x80..=0x9F)
+ | (0xEE..=0xEF, 0x80..=0xBF) => {}
+ _ => err!(Some(1)),
+ }
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(2))
+ }
+ }
+ 4 => {
+ match (first, next!()) {
+ (0xF0, 0x90..=0xBF) | (0xF1..=0xF3, 0x80..=0xBF) | (0xF4, 0x80..=0x8F) => {}
+ _ => err!(Some(1)),
+ }
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(2))
+ }
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(3))
+ }
+ }
+ _ => err!(Some(1)),
+ }
+ index += 1;
+ } else {
+ // Ascii case, try to skip forward quickly.
+ // When the pointer is aligned, read 2 words of data per iteration
+ // until we find a word containing a non-ascii byte.
+ if align != usize::MAX && align.wrapping_sub(index) % usize_bytes == 0 {
+ let ptr = v.as_ptr();
+ while index < blocks_end {
+ // SAFETY: since `align - index` and `ascii_block_size` are
+ // multiples of `usize_bytes`, `block = ptr.add(index)` is
+ // always aligned with a `usize` so it's safe to dereference
+ // both `block` and `block.offset(1)`.
+ unsafe {
+ let block = ptr.add(index) as *const usize;
+ // break if there is a nonascii byte
+ let zu = contains_nonascii(*block);
+ let zv = contains_nonascii(*block.offset(1));
+ if zu | zv {
+ break;
+ }
+ }
+ index += ascii_block_size;
+ }
+ // step from the point where the wordwise loop stopped
+ while index < len && v[index] < 128 {
+ index += 1;
+ }
+ } else {
+ index += 1;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+// https://tools.ietf.org/html/rfc3629
+static UTF8_CHAR_WIDTH: [u8; 256] = [
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, // 0x1F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, // 0x3F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, // 0x5F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, // 0x7F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, // 0x9F
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, // 0xBF
+ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, // 0xDF
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xEF
+ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xFF
+];
+
+/// Given a first byte, determines how many bytes are in this UTF-8 character.
+#[unstable(feature = "str_internals", issue = "none")]
+#[inline]
+pub fn utf8_char_width(b: u8) -> usize {
+ UTF8_CHAR_WIDTH[b as usize] as usize
+}
+
+/// Mask of the value bits of a continuation byte.
+const CONT_MASK: u8 = 0b0011_1111;
+/// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte.
+const TAG_CONT_U8: u8 = 0b1000_0000;
+
+// truncate `&str` to length at most equal to `max`
+// return `true` if it were truncated, and the new str.
+pub(super) fn truncate_to_char_boundary(s: &str, mut max: usize) -> (bool, &str) {
+ if max >= s.len() {
+ (false, s)
+ } else {
+ while !s.is_char_boundary(max) {
+ max -= 1;
+ }
+ (true, &s[..max])
+ }
+}
--- /dev/null
+//! Atomic types
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent
+//! types.
+//!
+//! This module defines atomic versions of a select number of primitive
+//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
+//! [`AtomicI8`], [`AtomicU16`], etc.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an [`Ordering`] which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2].
+//!
+//! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order
+//! [2]: ../../../nomicon/atomics.html
+//!
+//! Atomic variables are safe to share between threads (they implement [`Sync`])
+//! but they do not themselves provide the mechanism for sharing and follow the
+//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
+//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
+//! atomically-reference-counted shared pointer).
+//!
+//! [arc]: ../../../std/sync/struct.Arc.html
+//!
+//! Atomic types may be stored in static variables, initialized using
+//! the constant initializers like [`AtomicBool::new`]. Atomic statics
+//! are often used for lazy global initialization.
+//!
+//! # Portability
+//!
+//! All atomic types in this module are guaranteed to be [lock-free] if they're
+//! available. This means they don't internally acquire a global mutex. Atomic
+//! types and operations are not guaranteed to be wait-free. This means that
+//! operations like `fetch_or` may be implemented with a compare-and-swap loop.
+//!
+//! Atomic operations may be implemented at the instruction layer with
+//! larger-size atomics. For example some platforms use 4-byte atomic
+//! instructions to implement `AtomicI8`. Note that this emulation should not
+//! have an impact on correctness of code, it's just something to be aware of.
+//!
+//! The atomic types in this module may not be available on all platforms. The
+//! atomic types here are all widely available, however, and can generally be
+//! relied upon existing. Some notable exceptions are:
+//!
+//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
+//! `AtomicI64` types.
+//! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics
+//! at all.
+//! * ARM targets with `thumbv6m` do not have atomic operations at all.
+//!
+//! Note that future platforms may be added that also do not have support for
+//! some atomic operations. Maximally portable code will want to be careful
+//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
+//! generally the most portable, but even then they're not available everywhere.
+//! For reference, the `std` library requires pointer-sized atomics, although
+//! `core` does not.
+//!
+//! Currently you'll need to use `#[cfg(target_arch)]` primarily to
+//! conditionally compile in code with atomics. There is an unstable
+//! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future.
+//!
+//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
+//!
+//! # Examples
+//!
+//! A simple spinlock:
+//!
+//! ```
+//! use std::sync::Arc;
+//! use std::sync::atomic::{AtomicUsize, Ordering};
+//! use std::thread;
+//!
+//! fn main() {
+//! let spinlock = Arc::new(AtomicUsize::new(1));
+//!
+//! let spinlock_clone = Arc::clone(&spinlock);
+//! let thread = thread::spawn(move|| {
+//! spinlock_clone.store(0, Ordering::SeqCst);
+//! });
+//!
+//! // Wait for the other thread to release the lock
+//! while spinlock.load(Ordering::SeqCst) != 0 {}
+//!
+//! if let Err(panic) = thread.join() {
+//! println!("Thread had an error: {:?}", panic);
+//! }
+//! }
+//! ```
+//!
+//! Keep a global count of live threads:
+//!
+//! ```
+//! use std::sync::atomic::{AtomicUsize, Ordering};
+//!
+//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
+//!
+//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
+//! println!("live threads: {}", old_thread_count + 1);
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
+#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
+
+use self::Ordering::*;
+
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::intrinsics;
+
+use crate::hint::spin_loop;
+
+/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
+///
+/// This function is expected to be deprecated in favor of
+/// [`hint::spin_loop`].
+///
+/// **Note**: On platforms that do not support receiving spin-loop hints this function does not
+/// do anything at all.
+///
+/// [`hint::spin_loop`]: crate::hint::spin_loop
+#[inline]
+#[stable(feature = "spin_loop_hint", since = "1.24.0")]
+pub fn spin_loop_hint() {
+ spin_loop()
+}
+
+/// A boolean type which can be safely shared between threads.
+///
+/// This type has the same in-memory representation as a [`bool`].
+///
+/// **Note**: This type is only available on platforms that support atomic
+/// loads and stores of `u8`.
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[repr(C, align(1))]
+pub struct AtomicBool {
+ v: UnsafeCell<u8>,
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Default for AtomicBool {
+ /// Creates an `AtomicBool` initialized to `false`.
+ #[inline]
+ fn default() -> Self {
+ Self::new(false)
+ }
+}
+
+// Send is implicitly implemented for AtomicBool.
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl Sync for AtomicBool {}
+
+/// A raw pointer type which can be safely shared between threads.
+///
+/// This type has the same in-memory representation as a `*mut T`.
+///
+/// **Note**: This type is only available on platforms that support atomic
+/// loads and stores of pointers. Its size depends on the target pointer's size.
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
+#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
+#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
+pub struct AtomicPtr<T> {
+ p: UnsafeCell<*mut T>,
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for AtomicPtr<T> {
+ /// Creates a null `AtomicPtr<T>`.
+ fn default() -> AtomicPtr<T> {
+ AtomicPtr::new(crate::ptr::null_mut())
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T> Send for AtomicPtr<T> {}
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T> Sync for AtomicPtr<T> {}
+
+/// Atomic memory orderings
+///
+/// Memory orderings specify the way atomic operations synchronize memory.
+/// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
+/// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
+/// operations synchronize other memory while additionally preserving a total order of such
+/// operations across all threads.
+///
+/// Rust's memory orderings are [the same as those of
+/// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
+///
+/// For more information see the [nomicon].
+///
+/// [nomicon]: ../../../nomicon/atomics.html
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
+#[non_exhaustive]
+pub enum Ordering {
+ /// No ordering constraints, only atomic operations.
+ ///
+ /// Corresponds to [`memory_order_relaxed`] in C++20.
+ ///
+ /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Relaxed,
+ /// When coupled with a store, all previous operations become ordered
+ /// before any load of this value with [`Acquire`] (or stronger) ordering.
+ /// In particular, all previous writes become visible to all threads
+ /// that perform an [`Acquire`] (or stronger) load of this value.
+ ///
+ /// Notice that using this ordering for an operation that combines loads
+ /// and stores leads to a [`Relaxed`] load operation!
+ ///
+ /// This ordering is only applicable for operations that can perform a store.
+ ///
+ /// Corresponds to [`memory_order_release`] in C++20.
+ ///
+ /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Release,
+ /// When coupled with a load, if the loaded value was written by a store operation with
+ /// [`Release`] (or stronger) ordering, then all subsequent operations
+ /// become ordered after that store. In particular, all subsequent loads will see data
+ /// written before the store.
+ ///
+ /// Notice that using this ordering for an operation that combines loads
+ /// and stores leads to a [`Relaxed`] store operation!
+ ///
+ /// This ordering is only applicable for operations that can perform a load.
+ ///
+ /// Corresponds to [`memory_order_acquire`] in C++20.
+ ///
+ /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Acquire,
+ /// Has the effects of both [`Acquire`] and [`Release`] together:
+ /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
+ ///
+ /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
+ /// not performing any store and hence it has just [`Acquire`] ordering. However,
+ /// `AcqRel` will never perform [`Relaxed`] accesses.
+ ///
+ /// This ordering is only applicable for operations that combine both loads and stores.
+ ///
+ /// Corresponds to [`memory_order_acq_rel`] in C++20.
+ ///
+ /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ AcqRel,
+ /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
+ /// operations, respectively) with the additional guarantee that all threads see all
+ /// sequentially consistent operations in the same order.
+ ///
+ /// Corresponds to [`memory_order_seq_cst`] in C++20.
+ ///
+ /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
+ #[stable(feature = "rust1", since = "1.0.0")]
+ SeqCst,
+}
+
+/// An [`AtomicBool`] initialized to `false`.
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(
+ since = "1.34.0",
+ reason = "the `new` function is now preferred",
+ suggestion = "AtomicBool::new(false)"
+)]
+pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
+
+#[cfg(target_has_atomic_load_store = "8")]
+impl AtomicBool {
+ /// Creates a new `AtomicBool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ ///
+ /// let atomic_true = AtomicBool::new(true);
+ /// let atomic_false = AtomicBool::new(false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(v as u8) }
+ }
+
+ /// Returns a mutable reference to the underlying [`bool`].
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bool = AtomicBool::new(true);
+ /// assert_eq!(*some_bool.get_mut(), true);
+ /// *some_bool.get_mut() = false;
+ /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ pub fn get_mut(&mut self) -> &mut bool {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(self.v.get() as *mut bool) }
+ }
+
+ /// Get atomic access to a `&mut bool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bool = true;
+ /// let a = AtomicBool::from_mut(&mut some_bool);
+ /// a.store(false, Ordering::Relaxed);
+ /// assert_eq!(some_bool, false);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic_equal_alignment = "8")]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut(v: &mut bool) -> &Self {
+ // SAFETY: the mutable reference guarantees unique ownership, and
+ // alignment of both `bool` and `Self` is 1.
+ unsafe { &*(v as *mut bool as *mut Self) }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ /// assert_eq!(some_bool.into_inner(), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> bool {
+ self.v.into_inner() != 0
+ }
+
+ /// Loads a value from the bool.
+ ///
+ /// `load` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Release`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> bool {
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ unsafe { atomic_load(self.v.get(), order) != 0 }
+ }
+
+ /// Stores a value into the bool.
+ ///
+ /// `store` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Acquire`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// some_bool.store(false, Ordering::Relaxed);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: bool, order: Ordering) {
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ unsafe {
+ atomic_store(self.v.get(), val as u8, order);
+ }
+ }
+
+ /// Stores a value into the bool, returning the previous value.
+ ///
+ /// `swap` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn swap(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
+ /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
+ /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
+ /// happens, and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ ///
+ /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was written and containing
+ /// the previous value. On success this value is guaranteed to be equal to `current`.
+ ///
+ /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the
+ /// operation succeeds while the second describes the required ordering when the
+ /// operation fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+ /// and must be equivalent to or weaker than the success ordering.
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let some_bool = AtomicBool::new(true);
+ ///
+ /// assert_eq!(some_bool.compare_exchange(true,
+ /// false,
+ /// Ordering::Acquire,
+ /// Ordering::Relaxed),
+ /// Ok(true));
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ ///
+ /// assert_eq!(some_bool.compare_exchange(true, true,
+ /// Ordering::SeqCst,
+ /// Ordering::Acquire),
+ /// Err(false));
+ /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn compare_exchange(
+ &self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<bool, bool> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ match unsafe {
+ atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
+ }
+
+ /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
+ ///
+ /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// return value is a result indicating whether the new value was written and containing the
+ /// previous value.
+ ///
+ /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the
+ /// operation succeeds while the second describes the required ordering when the
+ /// operation fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+ /// and must be equivalent to or weaker than the success ordering.
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let val = AtomicBool::new(false);
+ ///
+ /// let new = true;
+ /// let mut old = val.load(Ordering::Relaxed);
+ /// loop {
+ /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ /// Ok(_) => break,
+ /// Err(x) => old = x,
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn compare_exchange_weak(
+ &self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<bool, bool> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ match unsafe {
+ atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
+ }
+
+ /// Logical "and" with a boolean value.
+ ///
+ /// Performs a logical "and" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Logical "nand" with a boolean value.
+ ///
+ /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
+ // We can't use atomic_nand here because it can result in a bool with
+ // an invalid value. This happens because the atomic operation is done
+ // with an 8-bit integer internally, which would set the upper 7 bits.
+ // So we just use fetch_xor or swap instead.
+ if val {
+ // !(x & true) == !x
+ // We must invert the bool.
+ self.fetch_xor(true, order)
+ } else {
+ // !(x & false) == true
+ // We must set the bool to true.
+ self.swap(true, order)
+ }
+ }
+
+ /// Logical "or" with a boolean value.
+ ///
+ /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
+ /// new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Logical "xor" with a boolean value.
+ ///
+ /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
+ /// the new value to the result.
+ ///
+ /// Returns the previous value.
+ ///
+ /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), true);
+ ///
+ /// let foo = AtomicBool::new(true);
+ /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ ///
+ /// let foo = AtomicBool::new(false);
+ /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
+ /// assert_eq!(foo.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
+ }
+
+ /// Returns a mutable pointer to the underlying [`bool`].
+ ///
+ /// Doing non-atomic reads and writes on the resulting integer can be a data race.
+ /// This method is mostly useful for FFI, where the function signature may use
+ /// `*mut bool` instead of `&AtomicBool`.
+ ///
+ /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+ /// atomic types work with interior mutability. All modifications of an atomic change the value
+ /// through a shared reference, and can do so safely as long as they use atomic operations. Any
+ /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+ /// restriction: operations on it must be atomic.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// # fn main() {
+ /// use std::sync::atomic::AtomicBool;
+ /// extern {
+ /// fn my_atomic_op(arg: *mut bool);
+ /// }
+ ///
+ /// let mut atomic = AtomicBool::new(true);
+ /// unsafe {
+ /// my_atomic_op(atomic.as_mut_ptr());
+ /// }
+ /// # }
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")]
+ pub fn as_mut_ptr(&self) -> *mut bool {
+ self.v.get() as *mut bool
+ }
+
+ /// Fetches the value, and applies a function to it that returns an optional
+ /// new value. Returns a `Result` of `Ok(previous_value)` if the function
+ /// returned `Some(_)`, else `Err(previous_value)`.
+ ///
+ /// Note: This may call the function multiple times if the value has been
+ /// changed from other threads in the meantime, as long as the function
+ /// returns `Some(_)`, but the function will have been applied only once to
+ /// the stored value.
+ ///
+ /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering for
+ /// when the operation finally succeeds while the second describes the
+ /// required ordering for loads. These correspond to the success and failure
+ /// orderings of [`AtomicBool::compare_exchange`] respectively.
+ ///
+ /// Using [`Acquire`] as success ordering makes the store part of this
+ /// operation [`Relaxed`], and using [`Release`] makes the final successful
+ /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
+ /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
+ /// success ordering.
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on `u8`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(atomic_fetch_update)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let x = AtomicBool::new(false);
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
+ /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
+ /// assert_eq!(x.load(Ordering::SeqCst), false);
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_fetch_update", reason = "recently added", issue = "78639")]
+ #[cfg(target_has_atomic = "8")]
+ pub fn fetch_update<F>(
+ &self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F,
+ ) -> Result<bool, bool>
+ where
+ F: FnMut(bool) -> Option<bool>,
+ {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev,
+ }
+ }
+ Err(prev)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+impl<T> AtomicPtr<T> {
+ /// Creates a new `AtomicPtr`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicPtr;
+ ///
+ /// let ptr = &mut 5;
+ /// let atomic_ptr = AtomicPtr::new(ptr);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_atomic_new", since = "1.32.0")]
+ pub const fn new(p: *mut T) -> AtomicPtr<T> {
+ AtomicPtr { p: UnsafeCell::new(p) }
+ }
+
+ /// Returns a mutable reference to the underlying pointer.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut atomic_ptr = AtomicPtr::new(&mut 10);
+ /// *atomic_ptr.get_mut() = &mut 5;
+ /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ pub fn get_mut(&mut self) -> &mut *mut T {
+ self.p.get_mut()
+ }
+
+ /// Get atomic access to a pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut)]
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut some_ptr = &mut 123 as *mut i32;
+ /// let a = AtomicPtr::from_mut(&mut some_ptr);
+ /// a.store(&mut 456, Ordering::Relaxed);
+ /// assert_eq!(unsafe { *some_ptr }, 456);
+ /// ```
+ #[inline]
+ #[cfg(target_has_atomic_equal_alignment = "ptr")]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut(v: &mut *mut T) -> &Self {
+ use crate::mem::align_of;
+ let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
+ // SAFETY:
+ // - the mutable reference guarantees unique ownership.
+ // - the alignment of `*mut T` and `Self` is the same on all platforms
+ // supported by rust, as verified above.
+ unsafe { &*(v as *mut *mut T as *mut Self) }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicPtr;
+ ///
+ /// let atomic_ptr = AtomicPtr::new(&mut 5);
+ /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
+ /// ```
+ #[inline]
+ #[stable(feature = "atomic_access", since = "1.15.0")]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> *mut T {
+ self.p.into_inner()
+ }
+
+ /// Loads a value from the pointer.
+ ///
+ /// `load` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Release`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let value = some_ptr.load(Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> *mut T {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
+ }
+
+ /// Stores a value into the pointer.
+ ///
+ /// `store` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is [`Acquire`] or [`AcqRel`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// some_ptr.store(other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, ptr: *mut T, order: Ordering) {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_store(self.p.get() as *mut usize, ptr as usize, order);
+ }
+ }
+
+ /// Stores a value into the pointer, returning the previous value.
+ ///
+ /// `swap` takes an [`Ordering`] argument which describes the memory ordering
+ /// of this operation. All ordering modes are possible. Note that using
+ /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
+ /// using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// The return value is always the previous value. If it is equal to `current`, then the value
+ /// was updated.
+ ///
+ /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
+ /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
+ /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
+ /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
+ /// happens, and using [`Release`] makes the load part [`Relaxed`].
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
+ match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was written and containing
+ /// the previous value. On success this value is guaranteed to be equal to `current`.
+ ///
+ /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the
+ /// operation succeeds while the second describes the required ordering when the
+ /// operation fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+ /// and must be equivalent to or weaker than the success ordering.
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let other_ptr = &mut 10;
+ ///
+ /// let value = some_ptr.compare_exchange(ptr, other_ptr,
+ /// Ordering::SeqCst, Ordering::Relaxed);
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ pub fn compare_exchange(
+ &self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<*mut T, *mut T> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ let res = atomic_compare_exchange(
+ self.p.get() as *mut usize,
+ current as usize,
+ new as usize,
+ success,
+ failure,
+ );
+ match res {
+ Ok(x) => Ok(x as *mut T),
+ Err(x) => Err(x as *mut T),
+ }
+ }
+ }
+
+ /// Stores a value into the pointer if the current value is the same as the `current` value.
+ ///
+ /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
+ /// comparison succeeds, which can result in more efficient code on some platforms. The
+ /// return value is a result indicating whether the new value was written and containing the
+ /// previous value.
+ ///
+ /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if the
+ /// operation succeeds while the second describes the required ordering when the
+ /// operation fails. Using [`Acquire`] as success ordering makes the store part
+ /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
+ /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+ /// and must be equivalent to or weaker than the success ordering.
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let some_ptr = AtomicPtr::new(&mut 5);
+ ///
+ /// let new = &mut 10;
+ /// let mut old = some_ptr.load(Ordering::Relaxed);
+ /// loop {
+ /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ /// Ok(_) => break,
+ /// Err(x) => old = x,
+ /// }
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ #[cfg(target_has_atomic = "ptr")]
+ pub fn compare_exchange_weak(
+ &self,
+ current: *mut T,
+ new: *mut T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<*mut T, *mut T> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ let res = atomic_compare_exchange_weak(
+ self.p.get() as *mut usize,
+ current as usize,
+ new as usize,
+ success,
+ failure,
+ );
+ match res {
+ Ok(x) => Ok(x as *mut T),
+ Err(x) => Err(x as *mut T),
+ }
+ }
+ }
+
+ /// Fetches the value, and applies a function to it that returns an optional
+ /// new value. Returns a `Result` of `Ok(previous_value)` if the function
+ /// returned `Some(_)`, else `Err(previous_value)`.
+ ///
+ /// Note: This may call the function multiple times if the value has been
+ /// changed from other threads in the meantime, as long as the function
+ /// returns `Some(_)`, but the function will have been applied only once to
+ /// the stored value.
+ ///
+ /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering for
+ /// when the operation finally succeeds while the second describes the
+ /// required ordering for loads. These correspond to the success and failure
+ /// orderings of [`AtomicPtr::compare_exchange`] respectively.
+ ///
+ /// Using [`Acquire`] as success ordering makes the store part of this
+ /// operation [`Relaxed`], and using [`Release`] makes the final successful
+ /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
+ /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the
+ /// success ordering.
+ ///
+ /// **Note:** This method is only available on platforms that support atomic
+ /// operations on pointers.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// #![feature(atomic_fetch_update)]
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let ptr: *mut _ = &mut 5;
+ /// let some_ptr = AtomicPtr::new(ptr);
+ ///
+ /// let new: *mut _ = &mut 10;
+ /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
+ /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
+ /// if x == ptr {
+ /// Some(new)
+ /// } else {
+ /// None
+ /// }
+ /// });
+ /// assert_eq!(result, Ok(ptr));
+ /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_fetch_update", reason = "recently added", issue = "78639")]
+ #[cfg(target_has_atomic = "ptr")]
+ pub fn fetch_update<F>(
+ &self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F,
+ ) -> Result<*mut T, *mut T>
+ where
+ F: FnMut(*mut T) -> Option<*mut T>,
+ {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev,
+ }
+ }
+ Err(prev)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "atomic_bool_from", since = "1.24.0")]
+impl From<bool> for AtomicBool {
+ /// Converts a `bool` into an `AtomicBool`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::atomic::AtomicBool;
+ /// let atomic_bool = AtomicBool::from(true);
+ /// assert_eq!(format!("{:?}", atomic_bool), "true")
+ /// ```
+ #[inline]
+ fn from(b: bool) -> Self {
+ Self::new(b)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "atomic_from", since = "1.23.0")]
+impl<T> From<*mut T> for AtomicPtr<T> {
+ #[inline]
+ fn from(p: *mut T) -> Self {
+ Self::new(p)
+ }
+}
+
+#[allow(unused_macros)] // This macro ends up being unused on some architectures.
+macro_rules! if_not_8_bit {
+ (u8, $($tt:tt)*) => { "" };
+ (i8, $($tt:tt)*) => { "" };
+ ($_:ident, $($tt:tt)*) => { $($tt)* };
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+macro_rules! atomic_int {
+ ($cfg_cas:meta,
+ $cfg_align:meta,
+ $stable:meta,
+ $stable_cxchg:meta,
+ $stable_debug:meta,
+ $stable_access:meta,
+ $stable_from:meta,
+ $stable_nand:meta,
+ $const_stable:meta,
+ $stable_init_const:meta,
+ $s_int_type:literal, $int_ref:expr,
+ $extra_feature:expr,
+ $min_fn:ident, $max_fn:ident,
+ $align:expr,
+ $atomic_new:expr,
+ $int_type:ident $atomic_type:ident $atomic_init:ident) => {
+ /// An integer type which can be safely shared between threads.
+ ///
+ /// This type has the same in-memory representation as the underlying
+ /// integer type, [`
+ #[doc = $s_int_type]
+ /// `](
+ #[doc = $int_ref]
+ /// ). For more about the differences between atomic types and
+ /// non-atomic types as well as information about the portability of
+ /// this type, please see the [module-level documentation].
+ ///
+ /// **Note:** This type is only available on platforms that support
+ /// atomic loads and stores of [`
+ #[doc = $s_int_type]
+ /// `](
+ #[doc = $int_ref]
+ /// ).
+ ///
+ /// [module-level documentation]: crate::sync::atomic
+ #[$stable]
+ #[repr(C, align($align))]
+ pub struct $atomic_type {
+ v: UnsafeCell<$int_type>,
+ }
+
+ /// An atomic integer initialized to `0`.
+ #[$stable_init_const]
+ #[rustc_deprecated(
+ since = "1.34.0",
+ reason = "the `new` function is now preferred",
+ suggestion = $atomic_new,
+ )]
+ pub const $atomic_init: $atomic_type = $atomic_type::new(0);
+
+ #[$stable]
+ impl Default for $atomic_type {
+ #[inline]
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+ }
+
+ #[$stable_from]
+ impl From<$int_type> for $atomic_type {
+ doc_comment! {
+ concat!(
+"Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`."),
+ #[inline]
+ fn from(v: $int_type) -> Self { Self::new(v) }
+ }
+ }
+
+ #[$stable_debug]
+ impl fmt::Debug for $atomic_type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
+ }
+ }
+
+ // Send is implicitly implemented.
+ #[$stable]
+ unsafe impl Sync for $atomic_type {}
+
+ impl $atomic_type {
+ doc_comment! {
+ concat!("Creates a new atomic integer.
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
+
+let atomic_forty_two = ", stringify!($atomic_type), "::new(42);
+```"),
+ #[inline]
+ #[$stable]
+ #[$const_stable]
+ pub const fn new(v: $int_type) -> Self {
+ Self {v: UnsafeCell::new(v)}
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns a mutable reference to the underlying integer.
+
+This is safe because the mutable reference guarantees that no other threads are
+concurrently accessing the atomic data.
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let mut some_var = ", stringify!($atomic_type), "::new(10);
+assert_eq!(*some_var.get_mut(), 10);
+*some_var.get_mut() = 5;
+assert_eq!(some_var.load(Ordering::SeqCst), 5);
+```"),
+ #[inline]
+ #[$stable_access]
+ pub fn get_mut(&mut self) -> &mut $int_type {
+ self.v.get_mut()
+ }
+ }
+
+ doc_comment! {
+ concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.
+
+",
+if_not_8_bit! {
+ $int_type,
+ concat!(
+ "**Note:** This function is only available on targets where `",
+ stringify!($int_type), "` has an alignment of ", $align, " bytes."
+ )
+},
+"
+
+# Examples
+
+```
+#![feature(atomic_from_mut)]
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let mut some_int = 123;
+let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);
+a.store(100, Ordering::Relaxed);
+assert_eq!(some_int, 100);
+```
+ "),
+ #[inline]
+ #[$cfg_align]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn from_mut(v: &mut $int_type) -> &Self {
+ use crate::mem::align_of;
+ let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
+ // SAFETY:
+ // - the mutable reference guarantees unique ownership.
+ // - the alignment of `$int_type` and `Self` is the
+ // same, as promised by $cfg_align and verified above.
+ unsafe { &*(v as *mut $int_type as *mut Self) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Consumes the atomic and returns the contained value.
+
+This is safe because passing `self` by value guarantees that no other threads are
+concurrently accessing the atomic data.
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
+
+let some_var = ", stringify!($atomic_type), "::new(5);
+assert_eq!(some_var.into_inner(), 5);
+```"),
+ #[inline]
+ #[$stable_access]
+ #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")]
+ pub const fn into_inner(self) -> $int_type {
+ self.v.into_inner()
+ }
+ }
+
+ doc_comment! {
+ concat!("Loads a value from the atomic integer.
+
+`load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
+Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
+
+# Panics
+
+Panics if `order` is [`Release`] or [`AcqRel`].
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let some_var = ", stringify!($atomic_type), "::new(5);
+
+assert_eq!(some_var.load(Ordering::Relaxed), 5);
+```"),
+ #[inline]
+ #[$stable]
+ pub fn load(&self, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_load(self.v.get(), order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Stores a value into the atomic integer.
+
+`store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
+ Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
+
+# Panics
+
+Panics if `order` is [`Acquire`] or [`AcqRel`].
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let some_var = ", stringify!($atomic_type), "::new(5);
+
+some_var.store(10, Ordering::Relaxed);
+assert_eq!(some_var.load(Ordering::Relaxed), 10);
+```"),
+ #[inline]
+ #[$stable]
+ pub fn store(&self, val: $int_type, order: Ordering) {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_store(self.v.get(), val, order); }
+ }
+ }
+
+ doc_comment! {
+ concat!("Stores a value into the atomic integer, returning the previous value.
+
+`swap` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let some_var = ", stringify!($atomic_type), "::new(5);
+
+assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_swap(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Stores a value into the atomic integer if the current value is the same as
+the `current` value.
+
+The return value is always the previous value. If it is equal to `current`, then the
+value was updated.
+
+`compare_and_swap` also takes an [`Ordering`] argument which describes the memory
+ordering of this operation. Notice that even when using [`AcqRel`], the operation
+might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
+Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
+happens, and using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let some_var = ", stringify!($atomic_type), "::new(5);
+
+assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
+assert_eq!(some_var.load(Ordering::Relaxed), 10);
+
+assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
+assert_eq!(some_var.load(Ordering::Relaxed), 10);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn compare_and_swap(&self,
+ current: $int_type,
+ new: $int_type,
+ order: Ordering) -> $int_type {
+ match self.compare_exchange(current,
+ new,
+ order,
+ strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Stores a value into the atomic integer if the current value is the same as
+the `current` value.
+
+The return value is a result indicating whether the new value was written and
+containing the previous value. On success this value is guaranteed to be equal to
+`current`.
+
+`compare_exchange` takes two [`Ordering`] arguments to describe the memory
+ordering of this operation. The first describes the required ordering if the
+operation succeeds while the second describes the required ordering when the
+operation fails. Using [`Acquire`] as success ordering makes the store part
+of this operation [`Relaxed`], and using [`Release`] makes the successful load
+[`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+and must be equivalent to or weaker than the success ordering.
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let some_var = ", stringify!($atomic_type), "::new(5);
+
+assert_eq!(some_var.compare_exchange(5, 10,
+ Ordering::Acquire,
+ Ordering::Relaxed),
+ Ok(5));
+assert_eq!(some_var.load(Ordering::Relaxed), 10);
+
+assert_eq!(some_var.compare_exchange(6, 12,
+ Ordering::SeqCst,
+ Ordering::Acquire),
+ Err(10));
+assert_eq!(some_var.load(Ordering::Relaxed), 10);
+```"),
+ #[inline]
+ #[$stable_cxchg]
+ #[$cfg_cas]
+ pub fn compare_exchange(&self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering) -> Result<$int_type, $int_type> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Stores a value into the atomic integer if the current value is the same as
+the `current` value.
+
+Unlike [`", stringify!($atomic_type), "::compare_exchange`], this function is allowed to spuriously fail even
+when the comparison succeeds, which can result in more efficient code on some
+platforms. The return value is a result indicating whether the new value was
+written and containing the previous value.
+
+`compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
+ordering of this operation. The first describes the required ordering if the
+operation succeeds while the second describes the required ordering when the
+operation fails. Using [`Acquire`] as success ordering makes the store part
+of this operation [`Relaxed`], and using [`Release`] makes the successful load
+[`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+and must be equivalent to or weaker than the success ordering.
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let val = ", stringify!($atomic_type), "::new(4);
+
+let mut old = val.load(Ordering::Relaxed);
+loop {
+ let new = old * 2;
+ match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
+ Ok(_) => break,
+ Err(x) => old = x,
+ }
+}
+```"),
+ #[inline]
+ #[$stable_cxchg]
+ #[$cfg_cas]
+ pub fn compare_exchange_weak(&self,
+ current: $int_type,
+ new: $int_type,
+ success: Ordering,
+ failure: Ordering) -> Result<$int_type, $int_type> {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe {
+ atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
+ }
+ }
+ }
+
+ doc_comment! {
+ concat!("Adds to the current value, returning the previous value.
+
+This operation wraps around on overflow.
+
+`fetch_add` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(0);
+assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
+assert_eq!(foo.load(Ordering::SeqCst), 10);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_add(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Subtracts from the current value, returning the previous value.
+
+This operation wraps around on overflow.
+
+`fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(20);
+assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
+assert_eq!(foo.load(Ordering::SeqCst), 10);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_sub(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Bitwise \"and\" with the current value.
+
+Performs a bitwise \"and\" operation on the current value and the argument `val`, and
+sets the new value to the result.
+
+Returns the previous value.
+
+`fetch_and` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(0b101101);
+assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
+assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_and(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Bitwise \"nand\" with the current value.
+
+Performs a bitwise \"nand\" operation on the current value and the argument `val`, and
+sets the new value to the result.
+
+Returns the previous value.
+
+`fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "
+use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(0x13);
+assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
+assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
+```"),
+ #[inline]
+ #[$stable_nand]
+ #[$cfg_cas]
+ pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_nand(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Bitwise \"or\" with the current value.
+
+Performs a bitwise \"or\" operation on the current value and the argument `val`, and
+sets the new value to the result.
+
+Returns the previous value.
+
+`fetch_or` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(0b101101);
+assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
+assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_or(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Bitwise \"xor\" with the current value.
+
+Performs a bitwise \"xor\" operation on the current value and the argument `val`, and
+sets the new value to the result.
+
+Returns the previous value.
+
+`fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(0b101101);
+assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
+assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
+```"),
+ #[inline]
+ #[$stable]
+ #[$cfg_cas]
+ pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { atomic_xor(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Fetches the value, and applies a function to it that returns an optional
+new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
+`Err(previous_value)`.
+
+Note: This may call the function multiple times if the value has been changed from other threads in
+the meantime, as long as the function returns `Some(_)`, but the function will have been applied
+only once to the stored value.
+
+`fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
+The first describes the required ordering for when the operation finally succeeds while the second
+describes the required ordering for loads. These correspond to the success and failure orderings of
+[`", stringify!($atomic_type), "::compare_exchange`] respectively.
+
+Using [`Acquire`] as success ordering makes the store part
+of this operation [`Relaxed`], and using [`Release`] makes the final successful load
+[`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`]
+and must be equivalent to or weaker than the success ordering.
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```rust
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let x = ", stringify!($atomic_type), "::new(7);
+assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
+assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
+assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
+assert_eq!(x.load(Ordering::SeqCst), 9);
+```"),
+ #[inline]
+ #[stable(feature = "no_more_cas", since = "1.45.0")]
+ #[$cfg_cas]
+ pub fn fetch_update<F>(&self,
+ set_order: Ordering,
+ fetch_order: Ordering,
+ mut f: F) -> Result<$int_type, $int_type>
+ where F: FnMut($int_type) -> Option<$int_type> {
+ let mut prev = self.load(fetch_order);
+ while let Some(next) = f(prev) {
+ match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
+ x @ Ok(_) => return x,
+ Err(next_prev) => prev = next_prev
+ }
+ }
+ Err(prev)
+ }
+ }
+
+ doc_comment! {
+ concat!("Maximum with the current value.
+
+Finds the maximum of the current value and the argument `val`, and
+sets the new value to the result.
+
+Returns the previous value.
+
+`fetch_max` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(23);
+assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
+assert_eq!(foo.load(Ordering::SeqCst), 42);
+```
+
+If you want to obtain the maximum value in one step, you can use the following:
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(23);
+let bar = 42;
+let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
+assert!(max_foo == 42);
+```"),
+ #[inline]
+ #[stable(feature = "atomic_min_max", since = "1.45.0")]
+ #[$cfg_cas]
+ pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { $max_fn(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Minimum with the current value.
+
+Finds the minimum of the current value and the argument `val`, and
+sets the new value to the result.
+
+Returns the previous value.
+
+`fetch_min` takes an [`Ordering`] argument which describes the memory ordering
+of this operation. All ordering modes are possible. Note that using
+[`Acquire`] makes the store part of this operation [`Relaxed`], and
+using [`Release`] makes the load part [`Relaxed`].
+
+**Note**: This method is only available on platforms that support atomic
+operations on [`", $s_int_type, "`](", $int_ref, ").
+
+# Examples
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(23);
+assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
+assert_eq!(foo.load(Ordering::Relaxed), 23);
+assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
+assert_eq!(foo.load(Ordering::Relaxed), 22);
+```
+
+If you want to obtain the minimum value in one step, you can use the following:
+
+```
+", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};
+
+let foo = ", stringify!($atomic_type), "::new(23);
+let bar = 12;
+let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
+assert_eq!(min_foo, 12);
+```"),
+ #[inline]
+ #[stable(feature = "atomic_min_max", since = "1.45.0")]
+ #[$cfg_cas]
+ pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
+ // SAFETY: data races are prevented by atomic intrinsics.
+ unsafe { $min_fn(self.v.get(), val, order) }
+ }
+ }
+
+ doc_comment! {
+ concat!("Returns a mutable pointer to the underlying integer.
+
+Doing non-atomic reads and writes on the resulting integer can be a data race.
+This method is mostly useful for FFI, where the function signature may use
+`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
+
+Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+atomic types work with interior mutability. All modifications of an atomic change the value
+through a shared reference, and can do so safely as long as they use atomic operations. Any
+use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+restriction: operations on it must be atomic.
+
+# Examples
+
+```ignore (extern-declaration)
+# fn main() {
+", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
+
+extern {
+ fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
+}
+
+let mut atomic = ", stringify!($atomic_type), "::new(1);
+",
+// SAFETY: Safe as long as `my_atomic_op` is atomic.
+"unsafe {
+ my_atomic_op(atomic.as_mut_ptr());
+}
+# }
+```"),
+ #[inline]
+ #[unstable(feature = "atomic_mut_ptr",
+ reason = "recently added",
+ issue = "66893")]
+ pub fn as_mut_ptr(&self) -> *mut $int_type {
+ self.v.get()
+ }
+ }
+ }
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+atomic_int! {
+ cfg(target_has_atomic = "8"),
+ cfg(target_has_atomic_equal_alignment = "8"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "i8", "../../../std/primitive.i8.html",
+ "",
+ atomic_min, atomic_max,
+ 1,
+ "AtomicI8::new(0)",
+ i8 AtomicI8 ATOMIC_I8_INIT
+}
+#[cfg(target_has_atomic_load_store = "8")]
+atomic_int! {
+ cfg(target_has_atomic = "8"),
+ cfg(target_has_atomic_equal_alignment = "8"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "u8", "../../../std/primitive.u8.html",
+ "",
+ atomic_umin, atomic_umax,
+ 1,
+ "AtomicU8::new(0)",
+ u8 AtomicU8 ATOMIC_U8_INIT
+}
+#[cfg(target_has_atomic_load_store = "16")]
+atomic_int! {
+ cfg(target_has_atomic = "16"),
+ cfg(target_has_atomic_equal_alignment = "16"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "i16", "../../../std/primitive.i16.html",
+ "",
+ atomic_min, atomic_max,
+ 2,
+ "AtomicI16::new(0)",
+ i16 AtomicI16 ATOMIC_I16_INIT
+}
+#[cfg(target_has_atomic_load_store = "16")]
+atomic_int! {
+ cfg(target_has_atomic = "16"),
+ cfg(target_has_atomic_equal_alignment = "16"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "u16", "../../../std/primitive.u16.html",
+ "",
+ atomic_umin, atomic_umax,
+ 2,
+ "AtomicU16::new(0)",
+ u16 AtomicU16 ATOMIC_U16_INIT
+}
+#[cfg(target_has_atomic_load_store = "32")]
+atomic_int! {
+ cfg(target_has_atomic = "32"),
+ cfg(target_has_atomic_equal_alignment = "32"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "i32", "../../../std/primitive.i32.html",
+ "",
+ atomic_min, atomic_max,
+ 4,
+ "AtomicI32::new(0)",
+ i32 AtomicI32 ATOMIC_I32_INIT
+}
+#[cfg(target_has_atomic_load_store = "32")]
+atomic_int! {
+ cfg(target_has_atomic = "32"),
+ cfg(target_has_atomic_equal_alignment = "32"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "u32", "../../../std/primitive.u32.html",
+ "",
+ atomic_umin, atomic_umax,
+ 4,
+ "AtomicU32::new(0)",
+ u32 AtomicU32 ATOMIC_U32_INIT
+}
+#[cfg(target_has_atomic_load_store = "64")]
+atomic_int! {
+ cfg(target_has_atomic = "64"),
+ cfg(target_has_atomic_equal_alignment = "64"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "i64", "../../../std/primitive.i64.html",
+ "",
+ atomic_min, atomic_max,
+ 8,
+ "AtomicI64::new(0)",
+ i64 AtomicI64 ATOMIC_I64_INIT
+}
+#[cfg(target_has_atomic_load_store = "64")]
+atomic_int! {
+ cfg(target_has_atomic = "64"),
+ cfg(target_has_atomic_equal_alignment = "64"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ stable(feature = "integer_atomics_stable", since = "1.34.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "u64", "../../../std/primitive.u64.html",
+ "",
+ atomic_umin, atomic_umax,
+ 8,
+ "AtomicU64::new(0)",
+ u64 AtomicU64 ATOMIC_U64_INIT
+}
+#[cfg(target_has_atomic_load_store = "128")]
+atomic_int! {
+ cfg(target_has_atomic = "128"),
+ cfg(target_has_atomic_equal_alignment = "128"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "i128", "../../../std/primitive.i128.html",
+ "#![feature(integer_atomics)]\n\n",
+ atomic_min, atomic_max,
+ 16,
+ "AtomicI128::new(0)",
+ i128 AtomicI128 ATOMIC_I128_INIT
+}
+#[cfg(target_has_atomic_load_store = "128")]
+atomic_int! {
+ cfg(target_has_atomic = "128"),
+ cfg(target_has_atomic_equal_alignment = "128"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ unstable(feature = "integer_atomics", issue = "32976"),
+ "u128", "../../../std/primitive.u128.html",
+ "#![feature(integer_atomics)]\n\n",
+ atomic_umin, atomic_umax,
+ 16,
+ "AtomicU128::new(0)",
+ u128 AtomicU128 ATOMIC_U128_INIT
+}
+
+macro_rules! atomic_int_ptr_sized {
+ ( $($target_pointer_width:literal $align:literal)* ) => { $(
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[cfg(target_pointer_width = $target_pointer_width)]
+ atomic_int! {
+ cfg(target_has_atomic = "ptr"),
+ cfg(target_has_atomic_equal_alignment = "ptr"),
+ stable(feature = "rust1", since = "1.0.0"),
+ stable(feature = "extended_compare_and_swap", since = "1.10.0"),
+ stable(feature = "atomic_debug", since = "1.3.0"),
+ stable(feature = "atomic_access", since = "1.15.0"),
+ stable(feature = "atomic_from", since = "1.23.0"),
+ stable(feature = "atomic_nand", since = "1.27.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ stable(feature = "rust1", since = "1.0.0"),
+ "isize", "../../../std/primitive.isize.html",
+ "",
+ atomic_min, atomic_max,
+ $align,
+ "AtomicIsize::new(0)",
+ isize AtomicIsize ATOMIC_ISIZE_INIT
+ }
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[cfg(target_pointer_width = $target_pointer_width)]
+ atomic_int! {
+ cfg(target_has_atomic = "ptr"),
+ cfg(target_has_atomic_equal_alignment = "ptr"),
+ stable(feature = "rust1", since = "1.0.0"),
+ stable(feature = "extended_compare_and_swap", since = "1.10.0"),
+ stable(feature = "atomic_debug", since = "1.3.0"),
+ stable(feature = "atomic_access", since = "1.15.0"),
+ stable(feature = "atomic_from", since = "1.23.0"),
+ stable(feature = "atomic_nand", since = "1.27.0"),
+ rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+ stable(feature = "rust1", since = "1.0.0"),
+ "usize", "../../../std/primitive.usize.html",
+ "",
+ atomic_umin, atomic_umax,
+ $align,
+ "AtomicUsize::new(0)",
+ usize AtomicUsize ATOMIC_USIZE_INIT
+ }
+ )* };
+}
+
+atomic_int_ptr_sized! {
+ "16" 2
+ "32" 4
+ "64" 8
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+fn strongest_failure_ordering(order: Ordering) -> Ordering {
+ match order {
+ Release => Relaxed,
+ Relaxed => Relaxed,
+ SeqCst => SeqCst,
+ Acquire => Acquire,
+ AcqRel => Acquire,
+ }
+}
+
+#[inline]
+unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
+ // SAFETY: the caller must uphold the safety contract for `atomic_store`.
+ unsafe {
+ match order {
+ Release => intrinsics::atomic_store_rel(dst, val),
+ Relaxed => intrinsics::atomic_store_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_store(dst, val),
+ Acquire => panic!("there is no such thing as an acquire store"),
+ AcqRel => panic!("there is no such thing as an acquire/release store"),
+ }
+ }
+}
+
+#[inline]
+unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_load`.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_load_acq(dst),
+ Relaxed => intrinsics::atomic_load_relaxed(dst),
+ SeqCst => intrinsics::atomic_load(dst),
+ Release => panic!("there is no such thing as a release load"),
+ AcqRel => panic!("there is no such thing as an acquire/release load"),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_xchg_acq(dst, val),
+ Release => intrinsics::atomic_xchg_rel(dst, val),
+ AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xchg(dst, val),
+ }
+ }
+}
+
+/// Returns the previous value (like __sync_fetch_and_add).
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_add`.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_xadd_acq(dst, val),
+ Release => intrinsics::atomic_xadd_rel(dst, val),
+ AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xadd(dst, val),
+ }
+ }
+}
+
+/// Returns the previous value (like __sync_fetch_and_sub).
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_xsub_acq(dst, val),
+ Release => intrinsics::atomic_xsub_rel(dst, val),
+ AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xsub(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_compare_exchange<T: Copy>(
+ dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
+ let (val, ok) = unsafe {
+ match (success, failure) {
+ (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
+ (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+ (_, Release) => panic!("there is no such thing as a release failure ordering"),
+ _ => panic!("a failure ordering can't be stronger than a success ordering"),
+ }
+ };
+ if ok { Ok(val) } else { Err(val) }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_compare_exchange_weak<T: Copy>(
+ dst: *mut T,
+ old: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
+ let (val, ok) = unsafe {
+ match (success, failure) {
+ (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
+ (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
+ (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
+ (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
+ (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
+ (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
+ (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
+ (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
+ (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
+ (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+ (_, Release) => panic!("there is no such thing as a release failure ordering"),
+ _ => panic!("a failure ordering can't be stronger than a success ordering"),
+ }
+ };
+ if ok { Ok(val) } else { Err(val) }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_and`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_and_acq(dst, val),
+ Release => intrinsics::atomic_and_rel(dst, val),
+ AcqRel => intrinsics::atomic_and_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_and_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_and(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_nand`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_nand_acq(dst, val),
+ Release => intrinsics::atomic_nand_rel(dst, val),
+ AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_nand(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_or`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_or_acq(dst, val),
+ Release => intrinsics::atomic_or_rel(dst, val),
+ AcqRel => intrinsics::atomic_or_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_or_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_or(dst, val),
+ }
+ }
+}
+
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_xor`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_xor_acq(dst, val),
+ Release => intrinsics::atomic_xor_rel(dst, val),
+ AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_xor(dst, val),
+ }
+ }
+}
+
+/// returns the max value (signed comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_max`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_max_acq(dst, val),
+ Release => intrinsics::atomic_max_rel(dst, val),
+ AcqRel => intrinsics::atomic_max_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_max_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_max(dst, val),
+ }
+ }
+}
+
+/// returns the min value (signed comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_min`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_min_acq(dst, val),
+ Release => intrinsics::atomic_min_rel(dst, val),
+ AcqRel => intrinsics::atomic_min_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_min_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_min(dst, val),
+ }
+ }
+}
+
+/// returns the max value (unsigned comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_umax`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_umax_acq(dst, val),
+ Release => intrinsics::atomic_umax_rel(dst, val),
+ AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_umax(dst, val),
+ }
+ }
+}
+
+/// returns the min value (unsigned comparison)
+#[inline]
+#[cfg(target_has_atomic = "8")]
+unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+ // SAFETY: the caller must uphold the safety contract for `atomic_umin`
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_umin_acq(dst, val),
+ Release => intrinsics::atomic_umin_rel(dst, val),
+ AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
+ Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
+ SeqCst => intrinsics::atomic_umin(dst, val),
+ }
+ }
+}
+
+/// An atomic fence.
+///
+/// Depending on the specified order, a fence prevents the compiler and CPU from
+/// reordering certain types of memory operations around it.
+/// That creates synchronizes-with relationships between it and atomic operations
+/// or fences in other threads.
+///
+/// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
+/// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
+/// exist operations X and Y, both operating on some atomic object 'M' such
+/// that A is sequenced before X, Y is synchronized before B and Y observes
+/// the change to M. This provides a happens-before dependence between A and B.
+///
+/// ```text
+/// Thread 1 Thread 2
+///
+/// fence(Release); A --------------
+/// x.store(3, Relaxed); X --------- |
+/// | |
+/// | |
+/// -------------> Y if x.load(Relaxed) == 3 {
+/// |-------> B fence(Acquire);
+/// ...
+/// }
+/// ```
+///
+/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
+/// with a fence.
+///
+/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
+/// and [`Release`] semantics, participates in the global program order of the
+/// other [`SeqCst`] operations and/or fences.
+///
+/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
+///
+/// # Panics
+///
+/// Panics if `order` is [`Relaxed`].
+///
+/// # Examples
+///
+/// ```
+/// use std::sync::atomic::AtomicBool;
+/// use std::sync::atomic::fence;
+/// use std::sync::atomic::Ordering;
+///
+/// // A mutual exclusion primitive based on spinlock.
+/// pub struct Mutex {
+/// flag: AtomicBool,
+/// }
+///
+/// impl Mutex {
+/// pub fn new() -> Mutex {
+/// Mutex {
+/// flag: AtomicBool::new(false),
+/// }
+/// }
+///
+/// pub fn lock(&self) {
+/// // Wait until the old value is `false`.
+/// while self.flag.compare_and_swap(false, true, Ordering::Relaxed) != false {}
+/// // This fence synchronizes-with store in `unlock`.
+/// fence(Ordering::Acquire);
+/// }
+///
+/// pub fn unlock(&self) {
+/// self.flag.store(false, Ordering::Release);
+/// }
+/// }
+/// ```
+#[inline]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn fence(order: Ordering) {
+ // SAFETY: using an atomic fence is safe.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_fence_acq(),
+ Release => intrinsics::atomic_fence_rel(),
+ AcqRel => intrinsics::atomic_fence_acqrel(),
+ SeqCst => intrinsics::atomic_fence(),
+ Relaxed => panic!("there is no such thing as a relaxed fence"),
+ }
+ }
+}
+
+/// A compiler memory fence.
+///
+/// `compiler_fence` does not emit any machine code, but restricts the kinds
+/// of memory re-ordering the compiler is allowed to do. Specifically, depending on
+/// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads
+/// or writes from before or after the call to the other side of the call to
+/// `compiler_fence`. Note that it does **not** prevent the *hardware*
+/// from doing such re-ordering. This is not a problem in a single-threaded,
+/// execution context, but when other threads may modify memory at the same
+/// time, stronger synchronization primitives such as [`fence`] are required.
+///
+/// The re-ordering prevented by the different ordering semantics are:
+///
+/// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed.
+/// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes.
+/// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads.
+/// - with [`AcqRel`], both of the above rules are enforced.
+///
+/// `compiler_fence` is generally only useful for preventing a thread from
+/// racing *with itself*. That is, if a given thread is executing one piece
+/// of code, and is then interrupted, and starts executing code elsewhere
+/// (while still in the same thread, and conceptually still on the same
+/// core). In traditional programs, this can only occur when a signal
+/// handler is registered. In more low-level code, such situations can also
+/// arise when handling interrupts, when implementing green threads with
+/// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's
+/// discussion of [memory barriers].
+///
+/// # Panics
+///
+/// Panics if `order` is [`Relaxed`].
+///
+/// # Examples
+///
+/// Without `compiler_fence`, the `assert_eq!` in following code
+/// is *not* guaranteed to succeed, despite everything happening in a single thread.
+/// To see why, remember that the compiler is free to swap the stores to
+/// `IMPORTANT_VARIABLE` and `IS_READ` since they are both
+/// `Ordering::Relaxed`. If it does, and the signal handler is invoked right
+/// after `IS_READY` is updated, then the signal handler will see
+/// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`.
+/// Using a `compiler_fence` remedies this situation.
+///
+/// ```
+/// use std::sync::atomic::{AtomicBool, AtomicUsize};
+/// use std::sync::atomic::Ordering;
+/// use std::sync::atomic::compiler_fence;
+///
+/// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0);
+/// static IS_READY: AtomicBool = AtomicBool::new(false);
+///
+/// fn main() {
+/// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed);
+/// // prevent earlier writes from being moved beyond this point
+/// compiler_fence(Ordering::Release);
+/// IS_READY.store(true, Ordering::Relaxed);
+/// }
+///
+/// fn signal_handler() {
+/// if IS_READY.load(Ordering::Relaxed) {
+/// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42);
+/// }
+/// }
+/// ```
+///
+/// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt
+#[inline]
+#[stable(feature = "compiler_fences", since = "1.21.0")]
+pub fn compiler_fence(order: Ordering) {
+ // SAFETY: using an atomic fence is safe.
+ unsafe {
+ match order {
+ Acquire => intrinsics::atomic_singlethreadfence_acq(),
+ Release => intrinsics::atomic_singlethreadfence_rel(),
+ AcqRel => intrinsics::atomic_singlethreadfence_acqrel(),
+ SeqCst => intrinsics::atomic_singlethreadfence(),
+ Relaxed => panic!("there is no such thing as a relaxed compiler fence"),
+ }
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "8")]
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl fmt::Debug for AtomicBool {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "atomic_debug", since = "1.3.0")]
+impl<T> fmt::Debug for AtomicPtr<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
+ }
+}
+
+#[cfg(target_has_atomic_load_store = "ptr")]
+#[stable(feature = "atomic_pointer", since = "1.24.0")]
+impl<T> fmt::Pointer for AtomicPtr<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
+ }
+}
--- /dev/null
+//! Synchronization primitives
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+pub mod atomic;
--- /dev/null
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+//! Types and Traits for working with asynchronous tasks.
+
+mod poll;
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use self::poll::Poll;
+
+mod wake;
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub use self::wake::{Context, RawWaker, RawWakerVTable, Waker};
+
+mod ready;
+#[unstable(feature = "ready_macro", issue = "70922")]
+pub use ready::ready;
--- /dev/null
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+use crate::ops::Try;
+use crate::result::Result;
+
+/// Indicates whether a value is available or if the current task has been
+/// scheduled to receive a wakeup instead.
+#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"]
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub enum Poll<T> {
+ /// Represents that a value is immediately ready.
+ #[lang = "Ready"]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ Ready(#[stable(feature = "futures_api", since = "1.36.0")] T),
+
+ /// Represents that a value is not ready yet.
+ ///
+ /// When a function returns `Pending`, the function *must* also
+ /// ensure that the current task is scheduled to be awoken when
+ /// progress can be made.
+ #[lang = "Pending"]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ Pending,
+}
+
+impl<T> Poll<T> {
+ /// Changes the ready value of this `Poll` with the closure provided.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn map<U, F>(self, f: F) -> Poll<U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ match self {
+ Poll::Ready(t) => Poll::Ready(f(t)),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ /// Returns `true` if this is `Poll::Ready`
+ #[inline]
+ #[rustc_const_stable(feature = "const_poll", since = "1.49.0")]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub const fn is_ready(&self) -> bool {
+ matches!(*self, Poll::Ready(_))
+ }
+
+ /// Returns `true` if this is `Poll::Pending`
+ #[inline]
+ #[rustc_const_stable(feature = "const_poll", since = "1.49.0")]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub const fn is_pending(&self) -> bool {
+ !self.is_ready()
+ }
+}
+
+impl<T, E> Poll<Result<T, E>> {
+ /// Changes the success value of this `Poll` with the closure provided.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn map_ok<U, F>(self, f: F) -> Poll<Result<U, E>>
+ where
+ F: FnOnce(T) -> U,
+ {
+ match self {
+ Poll::Ready(Ok(t)) => Poll::Ready(Ok(f(t))),
+ Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ /// Changes the error value of this `Poll` with the closure provided.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn map_err<U, F>(self, f: F) -> Poll<Result<T, U>>
+ where
+ F: FnOnce(E) -> U,
+ {
+ match self {
+ Poll::Ready(Ok(t)) => Poll::Ready(Ok(t)),
+ Poll::Ready(Err(e)) => Poll::Ready(Err(f(e))),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+impl<T, E> Poll<Option<Result<T, E>>> {
+ /// Changes the success value of this `Poll` with the closure provided.
+ #[unstable(feature = "poll_map", issue = "63514")]
+ pub fn map_ok<U, F>(self, f: F) -> Poll<Option<Result<U, E>>>
+ where
+ F: FnOnce(T) -> U,
+ {
+ match self {
+ Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(f(t)))),
+ Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
+ Poll::Ready(None) => Poll::Ready(None),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+
+ /// Changes the error value of this `Poll` with the closure provided.
+ #[unstable(feature = "poll_map", issue = "63514")]
+ pub fn map_err<U, F>(self, f: F) -> Poll<Option<Result<T, U>>>
+ where
+ F: FnOnce(E) -> U,
+ {
+ match self {
+ Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(t))),
+ Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(f(e)))),
+ Poll::Ready(None) => Poll::Ready(None),
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<T> From<T> for Poll<T> {
+ /// Convert to a `Ready` variant.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use core::task::Poll;
+ /// assert_eq!(Poll::from(true), Poll::Ready(true));
+ /// ```
+ fn from(t: T) -> Poll<T> {
+ Poll::Ready(t)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<T, E> Try for Poll<Result<T, E>> {
+ type Ok = Poll<T>;
+ type Error = E;
+
+ #[inline]
+ fn into_result(self) -> Result<Self::Ok, Self::Error> {
+ match self {
+ Poll::Ready(Ok(x)) => Ok(Poll::Ready(x)),
+ Poll::Ready(Err(e)) => Err(e),
+ Poll::Pending => Ok(Poll::Pending),
+ }
+ }
+
+ #[inline]
+ fn from_error(e: Self::Error) -> Self {
+ Poll::Ready(Err(e))
+ }
+
+ #[inline]
+ fn from_ok(x: Self::Ok) -> Self {
+ x.map(Ok)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<T, E> Try for Poll<Option<Result<T, E>>> {
+ type Ok = Poll<Option<T>>;
+ type Error = E;
+
+ #[inline]
+ fn into_result(self) -> Result<Self::Ok, Self::Error> {
+ match self {
+ Poll::Ready(Some(Ok(x))) => Ok(Poll::Ready(Some(x))),
+ Poll::Ready(Some(Err(e))) => Err(e),
+ Poll::Ready(None) => Ok(Poll::Ready(None)),
+ Poll::Pending => Ok(Poll::Pending),
+ }
+ }
+
+ #[inline]
+ fn from_error(e: Self::Error) -> Self {
+ Poll::Ready(Some(Err(e)))
+ }
+
+ #[inline]
+ fn from_ok(x: Self::Ok) -> Self {
+ x.map(|x| x.map(Ok))
+ }
+}
--- /dev/null
+/// Extracts the successful type of a `Poll<T>`.
+///
+/// This macro bakes in propagation of `Pending` signals by returning early.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(ready_macro)]
+///
+/// use core::task::{ready, Context, Poll};
+/// use core::future::{self, Future};
+/// use core::pin::Pin;
+///
+/// pub fn do_poll(cx: &mut Context<'_>) -> Poll<()> {
+/// let mut fut = future::ready(42);
+/// let fut = Pin::new(&mut fut);
+///
+/// let num = ready!(fut.poll(cx));
+/// # drop(num);
+/// // ... use num
+///
+/// Poll::Ready(())
+/// }
+/// ```
+///
+/// The `ready!` call expands to:
+///
+/// ```
+/// # #![feature(ready_macro)]
+/// #
+/// # use core::task::{Context, Poll};
+/// # use core::future::{self, Future};
+/// # use core::pin::Pin;
+/// #
+/// # pub fn do_poll(cx: &mut Context<'_>) -> Poll<()> {
+/// # let mut fut = future::ready(42);
+/// # let fut = Pin::new(&mut fut);
+/// #
+/// let num = match fut.poll(cx) {
+/// Poll::Ready(t) => t,
+/// Poll::Pending => return Poll::Pending,
+/// };
+/// # drop(num);
+/// # // ... use num
+/// #
+/// # Poll::Ready(())
+/// # }
+/// ```
+#[unstable(feature = "ready_macro", issue = "70922")]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro ready($e:expr) {
+ match $e {
+ $crate::task::Poll::Ready(t) => t,
+ $crate::task::Poll::Pending => {
+ return $crate::task::Poll::Pending;
+ }
+ }
+}
--- /dev/null
+#![stable(feature = "futures_api", since = "1.36.0")]
+
+use crate::fmt;
+use crate::marker::{PhantomData, Unpin};
+
+/// A `RawWaker` allows the implementor of a task executor to create a [`Waker`]
+/// which provides customized wakeup behavior.
+///
+/// [vtable]: https://en.wikipedia.org/wiki/Virtual_method_table
+///
+/// It consists of a data pointer and a [virtual function pointer table (vtable)][vtable]
+/// that customizes the behavior of the `RawWaker`.
+#[derive(PartialEq, Debug)]
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub struct RawWaker {
+ /// A data pointer, which can be used to store arbitrary data as required
+ /// by the executor. This could be e.g. a type-erased pointer to an `Arc`
+ /// that is associated with the task.
+ /// The value of this field gets passed to all functions that are part of
+ /// the vtable as the first parameter.
+ data: *const (),
+ /// Virtual function pointer table that customizes the behavior of this waker.
+ vtable: &'static RawWakerVTable,
+}
+
+impl RawWaker {
+ /// Creates a new `RawWaker` from the provided `data` pointer and `vtable`.
+ ///
+ /// The `data` pointer can be used to store arbitrary data as required
+ /// by the executor. This could be e.g. a type-erased pointer to an `Arc`
+ /// that is associated with the task.
+ /// The value of this pointer will get passed to all functions that are part
+ /// of the `vtable` as the first parameter.
+ ///
+ /// The `vtable` customizes the behavior of a `Waker` which gets created
+ /// from a `RawWaker`. For each operation on the `Waker`, the associated
+ /// function in the `vtable` of the underlying `RawWaker` will be called.
+ #[inline]
+ #[rustc_promotable]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_const_stable(feature = "futures_api", since = "1.36.0")]
+ pub const fn new(data: *const (), vtable: &'static RawWakerVTable) -> RawWaker {
+ RawWaker { data, vtable }
+ }
+}
+
+/// A virtual function pointer table (vtable) that specifies the behavior
+/// of a [`RawWaker`].
+///
+/// The pointer passed to all functions inside the vtable is the `data` pointer
+/// from the enclosing [`RawWaker`] object.
+///
+/// The functions inside this struct are only intended to be called on the `data`
+/// pointer of a properly constructed [`RawWaker`] object from inside the
+/// [`RawWaker`] implementation. Calling one of the contained functions using
+/// any other `data` pointer will cause undefined behavior.
+#[stable(feature = "futures_api", since = "1.36.0")]
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RawWakerVTable {
+ /// This function will be called when the [`RawWaker`] gets cloned, e.g. when
+ /// the [`Waker`] in which the [`RawWaker`] is stored gets cloned.
+ ///
+ /// The implementation of this function must retain all resources that are
+ /// required for this additional instance of a [`RawWaker`] and associated
+ /// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup
+ /// of the same task that would have been awoken by the original [`RawWaker`].
+ clone: unsafe fn(*const ()) -> RawWaker,
+
+ /// This function will be called when `wake` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ wake: unsafe fn(*const ()),
+
+ /// This function will be called when `wake_by_ref` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// This function is similar to `wake`, but must not consume the provided data
+ /// pointer.
+ wake_by_ref: unsafe fn(*const ()),
+
+ /// This function gets called when a [`RawWaker`] gets dropped.
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ drop: unsafe fn(*const ()),
+}
+
+impl RawWakerVTable {
+ /// Creates a new `RawWakerVTable` from the provided `clone`, `wake`,
+ /// `wake_by_ref`, and `drop` functions.
+ ///
+ /// # `clone`
+ ///
+ /// This function will be called when the [`RawWaker`] gets cloned, e.g. when
+ /// the [`Waker`] in which the [`RawWaker`] is stored gets cloned.
+ ///
+ /// The implementation of this function must retain all resources that are
+ /// required for this additional instance of a [`RawWaker`] and associated
+ /// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup
+ /// of the same task that would have been awoken by the original [`RawWaker`].
+ ///
+ /// # `wake`
+ ///
+ /// This function will be called when `wake` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ ///
+ /// # `wake_by_ref`
+ ///
+ /// This function will be called when `wake_by_ref` is called on the [`Waker`].
+ /// It must wake up the task associated with this [`RawWaker`].
+ ///
+ /// This function is similar to `wake`, but must not consume the provided data
+ /// pointer.
+ ///
+ /// # `drop`
+ ///
+ /// This function gets called when a [`RawWaker`] gets dropped.
+ ///
+ /// The implementation of this function must make sure to release any
+ /// resources that are associated with this instance of a [`RawWaker`] and
+ /// associated task.
+ #[rustc_promotable]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[rustc_const_stable(feature = "futures_api", since = "1.36.0")]
+ #[cfg_attr(not(bootstrap), rustc_allow_const_fn_unstable(const_fn_fn_ptr_basics))]
+ #[cfg_attr(bootstrap, allow_internal_unstable(const_fn_fn_ptr_basics))]
+ pub const fn new(
+ clone: unsafe fn(*const ()) -> RawWaker,
+ wake: unsafe fn(*const ()),
+ wake_by_ref: unsafe fn(*const ()),
+ drop: unsafe fn(*const ()),
+ ) -> Self {
+ Self { clone, wake, wake_by_ref, drop }
+ }
+}
+
+/// The `Context` of an asynchronous task.
+///
+/// Currently, `Context` only serves to provide access to a `&Waker`
+/// which can be used to wake the current task.
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub struct Context<'a> {
+ waker: &'a Waker,
+ // Ensure we future-proof against variance changes by forcing
+ // the lifetime to be invariant (argument-position lifetimes
+ // are contravariant while return-position lifetimes are
+ // covariant).
+ _marker: PhantomData<fn(&'a ()) -> &'a ()>,
+}
+
+impl<'a> Context<'a> {
+ /// Create a new `Context` from a `&Waker`.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[inline]
+ pub fn from_waker(waker: &'a Waker) -> Self {
+ Context { waker, _marker: PhantomData }
+ }
+
+ /// Returns a reference to the `Waker` for the current task.
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ #[inline]
+ pub fn waker(&self) -> &'a Waker {
+ &self.waker
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl fmt::Debug for Context<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Context").field("waker", &self.waker).finish()
+ }
+}
+
+/// A `Waker` is a handle for waking up a task by notifying its executor that it
+/// is ready to be run.
+///
+/// This handle encapsulates a [`RawWaker`] instance, which defines the
+/// executor-specific wakeup behavior.
+///
+/// Implements [`Clone`], [`Send`], and [`Sync`].
+#[repr(transparent)]
+#[stable(feature = "futures_api", since = "1.36.0")]
+pub struct Waker {
+ waker: RawWaker,
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl Unpin for Waker {}
+#[stable(feature = "futures_api", since = "1.36.0")]
+unsafe impl Send for Waker {}
+#[stable(feature = "futures_api", since = "1.36.0")]
+unsafe impl Sync for Waker {}
+
+impl Waker {
+ /// Wake up the task associated with this `Waker`.
+ #[inline]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn wake(self) {
+ // The actual wakeup call is delegated through a virtual function call
+ // to the implementation which is defined by the executor.
+ let wake = self.waker.vtable.wake;
+ let data = self.waker.data;
+
+ // Don't call `drop` -- the waker will be consumed by `wake`.
+ crate::mem::forget(self);
+
+ // SAFETY: This is safe because `Waker::from_raw` is the only way
+ // to initialize `wake` and `data` requiring the user to acknowledge
+ // that the contract of `RawWaker` is upheld.
+ unsafe { (wake)(data) };
+ }
+
+ /// Wake up the task associated with this `Waker` without consuming the `Waker`.
+ ///
+ /// This is similar to `wake`, but may be slightly less efficient in the case
+ /// where an owned `Waker` is available. This method should be preferred to
+ /// calling `waker.clone().wake()`.
+ #[inline]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn wake_by_ref(&self) {
+ // The actual wakeup call is delegated through a virtual function call
+ // to the implementation which is defined by the executor.
+
+ // SAFETY: see `wake`
+ unsafe { (self.waker.vtable.wake_by_ref)(self.waker.data) }
+ }
+
+ /// Returns `true` if this `Waker` and another `Waker` have awoken the same task.
+ ///
+ /// This function works on a best-effort basis, and may return false even
+ /// when the `Waker`s would awaken the same task. However, if this function
+ /// returns `true`, it is guaranteed that the `Waker`s will awaken the same task.
+ ///
+ /// This function is primarily used for optimization purposes.
+ #[inline]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub fn will_wake(&self, other: &Waker) -> bool {
+ self.waker == other.waker
+ }
+
+ /// Creates a new `Waker` from [`RawWaker`].
+ ///
+ /// The behavior of the returned `Waker` is undefined if the contract defined
+ /// in [`RawWaker`]'s and [`RawWakerVTable`]'s documentation is not upheld.
+ /// Therefore this method is unsafe.
+ #[inline]
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub unsafe fn from_raw(waker: RawWaker) -> Waker {
+ Waker { waker }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl Clone for Waker {
+ #[inline]
+ fn clone(&self) -> Self {
+ Waker {
+ // SAFETY: This is safe because `Waker::from_raw` is the only way
+ // to initialize `clone` and `data` requiring the user to acknowledge
+ // that the contract of [`RawWaker`] is upheld.
+ waker: unsafe { (self.waker.vtable.clone)(self.waker.data) },
+ }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl Drop for Waker {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY: This is safe because `Waker::from_raw` is the only way
+ // to initialize `drop` and `data` requiring the user to acknowledge
+ // that the contract of `RawWaker` is upheld.
+ unsafe { (self.waker.vtable.drop)(self.waker.data) }
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl fmt::Debug for Waker {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let vtable_ptr = self.waker.vtable as *const RawWakerVTable;
+ f.debug_struct("Waker")
+ .field("data", &self.waker.data)
+ .field("vtable", &vtable_ptr)
+ .finish()
+ }
+}
--- /dev/null
+#![stable(feature = "duration_core", since = "1.25.0")]
+
+//! Temporal quantification.
+//!
+//! Example:
+//!
+//! ```
+//! use std::time::Duration;
+//!
+//! let five_seconds = Duration::new(5, 0);
+//! // both declarations are equivalent
+//! assert_eq!(Duration::new(5, 0), Duration::from_secs(5));
+//! ```
+
+use crate::fmt;
+use crate::iter::Sum;
+use crate::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign};
+
+const NANOS_PER_SEC: u32 = 1_000_000_000;
+const NANOS_PER_MILLI: u32 = 1_000_000;
+const NANOS_PER_MICRO: u32 = 1_000;
+const MILLIS_PER_SEC: u64 = 1_000;
+const MICROS_PER_SEC: u64 = 1_000_000;
+
+/// A `Duration` type to represent a span of time, typically used for system
+/// timeouts.
+///
+/// Each `Duration` is composed of a whole number of seconds and a fractional part
+/// represented in nanoseconds. If the underlying system does not support
+/// nanosecond-level precision, APIs binding a system timeout will typically round up
+/// the number of nanoseconds.
+///
+/// [`Duration`]s implement many common traits, including [`Add`], [`Sub`], and other
+/// [`ops`] traits. It implements [`Default`] by returning a zero-length `Duration`.
+///
+/// [`ops`]: crate::ops
+///
+/// # Examples
+///
+/// ```
+/// use std::time::Duration;
+///
+/// let five_seconds = Duration::new(5, 0);
+/// let five_seconds_and_five_nanos = five_seconds + Duration::new(0, 5);
+///
+/// assert_eq!(five_seconds_and_five_nanos.as_secs(), 5);
+/// assert_eq!(five_seconds_and_five_nanos.subsec_nanos(), 5);
+///
+/// let ten_millis = Duration::from_millis(10);
+/// ```
+#[stable(feature = "duration", since = "1.3.0")]
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
+pub struct Duration {
+ secs: u64,
+ nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC
+}
+
+impl Duration {
+ /// The duration of one second.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::SECOND, Duration::from_secs(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const SECOND: Duration = Duration::from_secs(1);
+
+ /// The duration of one millisecond.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::MILLISECOND, Duration::from_millis(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const MILLISECOND: Duration = Duration::from_millis(1);
+
+ /// The duration of one microsecond.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::MICROSECOND, Duration::from_micros(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const MICROSECOND: Duration = Duration::from_micros(1);
+
+ /// The duration of one nanosecond.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::NANOSECOND, Duration::from_nanos(1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const NANOSECOND: Duration = Duration::from_nanos(1);
+
+ /// A duration of zero time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_zero)]
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::ZERO;
+ /// assert!(duration.is_zero());
+ /// assert_eq!(duration.as_nanos(), 0);
+ /// ```
+ #[unstable(feature = "duration_zero", issue = "73544")]
+ pub const ZERO: Duration = Duration::from_nanos(0);
+
+ /// The maximum duration.
+ ///
+ /// It is roughly equal to a duration of 584,942,417,355 years.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::MAX, Duration::new(u64::MAX, 1_000_000_000 - 1));
+ /// ```
+ #[unstable(feature = "duration_constants", issue = "57391")]
+ pub const MAX: Duration = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
+
+ /// Creates a new `Duration` from the specified number of whole seconds and
+ /// additional nanoseconds.
+ ///
+ /// If the number of nanoseconds is greater than 1 billion (the number of
+ /// nanoseconds in a second), then it will carry over into the seconds provided.
+ ///
+ /// # Panics
+ ///
+ /// This constructor will panic if the carry from the nanoseconds overflows
+ /// the seconds counter.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let five_seconds = Duration::new(5, 0);
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn new(secs: u64, nanos: u32) -> Duration {
+ let secs = match secs.checked_add((nanos / NANOS_PER_SEC) as u64) {
+ Some(secs) => secs,
+ None => panic!("overflow in Duration::new"),
+ };
+ let nanos = nanos % NANOS_PER_SEC;
+ Duration { secs, nanos }
+ }
+
+ /// Creates a new `Duration` from the specified number of whole seconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_secs(5);
+ ///
+ /// assert_eq!(5, duration.as_secs());
+ /// assert_eq!(0, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_secs(secs: u64) -> Duration {
+ Duration { secs, nanos: 0 }
+ }
+
+ /// Creates a new `Duration` from the specified number of milliseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(2569);
+ ///
+ /// assert_eq!(2, duration.as_secs());
+ /// assert_eq!(569_000_000, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_millis(millis: u64) -> Duration {
+ Duration {
+ secs: millis / MILLIS_PER_SEC,
+ nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of microseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_micros(1_000_002);
+ ///
+ /// assert_eq!(1, duration.as_secs());
+ /// assert_eq!(2000, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration_from_micros", since = "1.27.0")]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_micros(micros: u64) -> Duration {
+ Duration {
+ secs: micros / MICROS_PER_SEC,
+ nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of nanoseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_nanos(1_000_000_123);
+ ///
+ /// assert_eq!(1, duration.as_secs());
+ /// assert_eq!(123, duration.subsec_nanos());
+ /// ```
+ #[stable(feature = "duration_extras", since = "1.27.0")]
+ #[inline]
+ #[rustc_const_stable(feature = "duration_consts", since = "1.32.0")]
+ pub const fn from_nanos(nanos: u64) -> Duration {
+ Duration {
+ secs: nanos / (NANOS_PER_SEC as u64),
+ nanos: (nanos % (NANOS_PER_SEC as u64)) as u32,
+ }
+ }
+
+ /// Returns true if this `Duration` spans no time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_zero)]
+ /// use std::time::Duration;
+ ///
+ /// assert!(Duration::ZERO.is_zero());
+ /// assert!(Duration::new(0, 0).is_zero());
+ /// assert!(Duration::from_nanos(0).is_zero());
+ /// assert!(Duration::from_secs(0).is_zero());
+ ///
+ /// assert!(!Duration::new(1, 1).is_zero());
+ /// assert!(!Duration::from_nanos(1).is_zero());
+ /// assert!(!Duration::from_secs(1).is_zero());
+ /// ```
+ #[unstable(feature = "duration_zero", issue = "73544")]
+ #[inline]
+ pub const fn is_zero(&self) -> bool {
+ self.secs == 0 && self.nanos == 0
+ }
+
+ /// Returns the number of _whole_ seconds contained by this `Duration`.
+ ///
+ /// The returned value does not include the fractional (nanosecond) part of the
+ /// duration, which can be obtained using [`subsec_nanos`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// ```
+ ///
+ /// To determine the total number of seconds represented by the `Duration`,
+ /// use `as_secs` in combination with [`subsec_nanos`]:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ ///
+ /// assert_eq!(5.730023852,
+ /// duration.as_secs() as f64
+ /// + duration.subsec_nanos() as f64 * 1e-9);
+ /// ```
+ ///
+ /// [`subsec_nanos`]: Duration::subsec_nanos
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[rustc_const_stable(feature = "duration", since = "1.32.0")]
+ #[inline]
+ pub const fn as_secs(&self) -> u64 {
+ self.secs
+ }
+
+ /// Returns the fractional part of this `Duration`, in whole milliseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by milliseconds. The returned number always represents a
+ /// fractional portion of a second (i.e., it is less than one thousand).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(5432);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// assert_eq!(duration.subsec_millis(), 432);
+ /// ```
+ #[stable(feature = "duration_extras", since = "1.27.0")]
+ #[rustc_const_stable(feature = "duration_extras", since = "1.32.0")]
+ #[inline]
+ pub const fn subsec_millis(&self) -> u32 {
+ self.nanos / NANOS_PER_MILLI
+ }
+
+ /// Returns the fractional part of this `Duration`, in whole microseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by microseconds. The returned number always represents a
+ /// fractional portion of a second (i.e., it is less than one million).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_micros(1_234_567);
+ /// assert_eq!(duration.as_secs(), 1);
+ /// assert_eq!(duration.subsec_micros(), 234_567);
+ /// ```
+ #[stable(feature = "duration_extras", since = "1.27.0")]
+ #[rustc_const_stable(feature = "duration_extras", since = "1.32.0")]
+ #[inline]
+ pub const fn subsec_micros(&self) -> u32 {
+ self.nanos / NANOS_PER_MICRO
+ }
+
+ /// Returns the fractional part of this `Duration`, in nanoseconds.
+ ///
+ /// This method does **not** return the length of the duration when
+ /// represented by nanoseconds. The returned number always represents a
+ /// fractional portion of a second (i.e., it is less than one billion).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::from_millis(5010);
+ /// assert_eq!(duration.as_secs(), 5);
+ /// assert_eq!(duration.subsec_nanos(), 10_000_000);
+ /// ```
+ #[stable(feature = "duration", since = "1.3.0")]
+ #[rustc_const_stable(feature = "duration", since = "1.32.0")]
+ #[inline]
+ pub const fn subsec_nanos(&self) -> u32 {
+ self.nanos
+ }
+
+ /// Returns the total number of whole milliseconds contained by this `Duration`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_millis(), 5730);
+ /// ```
+ #[stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[inline]
+ pub const fn as_millis(&self) -> u128 {
+ self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128
+ }
+
+ /// Returns the total number of whole microseconds contained by this `Duration`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_micros(), 5730023);
+ /// ```
+ #[stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[inline]
+ pub const fn as_micros(&self) -> u128 {
+ self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128
+ }
+
+ /// Returns the total number of nanoseconds contained by this `Duration`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let duration = Duration::new(5, 730023852);
+ /// assert_eq!(duration.as_nanos(), 5730023852);
+ /// ```
+ #[stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[rustc_const_stable(feature = "duration_as_u128", since = "1.33.0")]
+ #[inline]
+ pub const fn as_nanos(&self) -> u128 {
+ self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128
+ }
+
+ /// Checked `Duration` addition. Computes `self + other`, returning [`None`]
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1)));
+ /// assert_eq!(Duration::new(1, 0).checked_add(Duration::new(u64::MAX, 0)), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn checked_add(self, rhs: Duration) -> Option<Duration> {
+ if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
+ let mut nanos = self.nanos + rhs.nanos;
+ if nanos >= NANOS_PER_SEC {
+ nanos -= NANOS_PER_SEC;
+ if let Some(new_secs) = secs.checked_add(1) {
+ secs = new_secs;
+ } else {
+ return None;
+ }
+ }
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs, nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Saturating `Duration` addition. Computes `self + other`, returning [`Duration::MAX`]
+ /// if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_saturating_ops)]
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 0).saturating_add(Duration::new(0, 1)), Duration::new(0, 1));
+ /// assert_eq!(Duration::new(1, 0).saturating_add(Duration::new(u64::MAX, 0)), Duration::MAX);
+ /// ```
+ #[unstable(feature = "duration_saturating_ops", issue = "76416")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn saturating_add(self, rhs: Duration) -> Duration {
+ match self.checked_add(rhs) {
+ Some(res) => res,
+ None => Duration::MAX,
+ }
+ }
+
+ /// Checked `Duration` subtraction. Computes `self - other`, returning [`None`]
+ /// if the result would be negative or if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 1).checked_sub(Duration::new(0, 0)), Some(Duration::new(0, 1)));
+ /// assert_eq!(Duration::new(0, 0).checked_sub(Duration::new(0, 1)), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn checked_sub(self, rhs: Duration) -> Option<Duration> {
+ if let Some(mut secs) = self.secs.checked_sub(rhs.secs) {
+ let nanos = if self.nanos >= rhs.nanos {
+ self.nanos - rhs.nanos
+ } else {
+ if let Some(sub_secs) = secs.checked_sub(1) {
+ secs = sub_secs;
+ self.nanos + NANOS_PER_SEC - rhs.nanos
+ } else {
+ return None;
+ }
+ };
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs, nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Saturating `Duration` subtraction. Computes `self - other`, returning [`Duration::ZERO`]
+ /// if the result would be negative or if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_saturating_ops)]
+ /// #![feature(duration_zero)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 1).saturating_sub(Duration::new(0, 0)), Duration::new(0, 1));
+ /// assert_eq!(Duration::new(0, 0).saturating_sub(Duration::new(0, 1)), Duration::ZERO);
+ /// ```
+ #[unstable(feature = "duration_saturating_ops", issue = "76416")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn saturating_sub(self, rhs: Duration) -> Duration {
+ match self.checked_sub(rhs) {
+ Some(res) => res,
+ None => Duration::ZERO,
+ }
+ }
+
+ /// Checked `Duration` multiplication. Computes `self * other`, returning
+ /// [`None`] if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 500_000_001).checked_mul(2), Some(Duration::new(1, 2)));
+ /// assert_eq!(Duration::new(u64::MAX - 1, 0).checked_mul(2), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn checked_mul(self, rhs: u32) -> Option<Duration> {
+ // Multiply nanoseconds as u64, because it cannot overflow that way.
+ let total_nanos = self.nanos as u64 * rhs as u64;
+ let extra_secs = total_nanos / (NANOS_PER_SEC as u64);
+ let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32;
+ if let Some(s) = self.secs.checked_mul(rhs as u64) {
+ if let Some(secs) = s.checked_add(extra_secs) {
+ debug_assert!(nanos < NANOS_PER_SEC);
+ return Some(Duration { secs, nanos });
+ }
+ }
+ None
+ }
+
+ /// Saturating `Duration` multiplication. Computes `self * other`, returning
+ /// [`Duration::MAX`] if overflow occurred.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(duration_saturating_ops)]
+ /// #![feature(duration_constants)]
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(0, 500_000_001).saturating_mul(2), Duration::new(1, 2));
+ /// assert_eq!(Duration::new(u64::MAX - 1, 0).saturating_mul(2), Duration::MAX);
+ /// ```
+ #[unstable(feature = "duration_saturating_ops", issue = "76416")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn saturating_mul(self, rhs: u32) -> Duration {
+ match self.checked_mul(rhs) {
+ Some(res) => res,
+ None => Duration::MAX,
+ }
+ }
+
+ /// Checked `Duration` division. Computes `self / other`, returning [`None`]
+ /// if `other == 0`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
+ /// assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
+ /// assert_eq!(Duration::new(2, 0).checked_div(0), None);
+ /// ```
+ #[stable(feature = "duration_checked_ops", since = "1.16.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn checked_div(self, rhs: u32) -> Option<Duration> {
+ if rhs != 0 {
+ let secs = self.secs / (rhs as u64);
+ let carry = self.secs - secs * (rhs as u64);
+ let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64);
+ let nanos = self.nanos / rhs + (extra_nanos as u32);
+ debug_assert!(nanos < NANOS_PER_SEC);
+ Some(Duration { secs, nanos })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the number of seconds contained by this `Duration` as `f64`.
+ ///
+ /// The returned value does include the fractional (nanosecond) part of the duration.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.as_secs_f64(), 2.7);
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn as_secs_f64(&self) -> f64 {
+ (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64)
+ }
+
+ /// Returns the number of seconds contained by this `Duration` as `f32`.
+ ///
+ /// The returned value does include the fractional (nanosecond) part of the duration.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.as_secs_f32(), 2.7);
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn as_secs_f32(&self) -> f32 {
+ (self.secs as f32) + (self.nanos as f32) / (NANOS_PER_SEC as f32)
+ }
+
+ /// Creates a new `Duration` from the specified number of seconds represented
+ /// as `f64`.
+ ///
+ /// # Panics
+ /// This constructor will panic if `secs` is not finite, negative or overflows `Duration`.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::from_secs_f64(2.7);
+ /// assert_eq!(dur, Duration::new(2, 700_000_000));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn from_secs_f64(secs: f64) -> Duration {
+ const MAX_NANOS_F64: f64 = ((u64::MAX as u128 + 1) * (NANOS_PER_SEC as u128)) as f64;
+ let nanos = secs * (NANOS_PER_SEC as f64);
+ if !nanos.is_finite() {
+ panic!("got non-finite value when converting float to duration");
+ }
+ if nanos >= MAX_NANOS_F64 {
+ panic!("overflow when converting float to duration");
+ }
+ if nanos < 0.0 {
+ panic!("underflow when converting float to duration");
+ }
+ let nanos = nanos as u128;
+ Duration {
+ secs: (nanos / (NANOS_PER_SEC as u128)) as u64,
+ nanos: (nanos % (NANOS_PER_SEC as u128)) as u32,
+ }
+ }
+
+ /// Creates a new `Duration` from the specified number of seconds represented
+ /// as `f32`.
+ ///
+ /// # Panics
+ /// This constructor will panic if `secs` is not finite, negative or overflows `Duration`.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::from_secs_f32(2.7);
+ /// assert_eq!(dur, Duration::new(2, 700_000_000));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn from_secs_f32(secs: f32) -> Duration {
+ const MAX_NANOS_F32: f32 = ((u64::MAX as u128 + 1) * (NANOS_PER_SEC as u128)) as f32;
+ let nanos = secs * (NANOS_PER_SEC as f32);
+ if !nanos.is_finite() {
+ panic!("got non-finite value when converting float to duration");
+ }
+ if nanos >= MAX_NANOS_F32 {
+ panic!("overflow when converting float to duration");
+ }
+ if nanos < 0.0 {
+ panic!("underflow when converting float to duration");
+ }
+ let nanos = nanos as u128;
+ Duration {
+ secs: (nanos / (NANOS_PER_SEC as u128)) as u64,
+ nanos: (nanos % (NANOS_PER_SEC as u128)) as u32,
+ }
+ }
+
+ /// Multiplies `Duration` by `f64`.
+ ///
+ /// # Panics
+ /// This method will panic if result is not finite, negative or overflows `Duration`.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.mul_f64(3.14), Duration::new(8, 478_000_000));
+ /// assert_eq!(dur.mul_f64(3.14e5), Duration::new(847_800, 0));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn mul_f64(self, rhs: f64) -> Duration {
+ Duration::from_secs_f64(rhs * self.as_secs_f64())
+ }
+
+ /// Multiplies `Duration` by `f32`.
+ ///
+ /// # Panics
+ /// This method will panic if result is not finite, negative or overflows `Duration`.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// // note that due to rounding errors result is slightly different
+ /// // from 8.478 and 847800.0
+ /// assert_eq!(dur.mul_f32(3.14), Duration::new(8, 478_000_640));
+ /// assert_eq!(dur.mul_f32(3.14e5), Duration::new(847799, 969_120_256));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn mul_f32(self, rhs: f32) -> Duration {
+ Duration::from_secs_f32(rhs * self.as_secs_f32())
+ }
+
+ /// Divide `Duration` by `f64`.
+ ///
+ /// # Panics
+ /// This method will panic if result is not finite, negative or overflows `Duration`.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// assert_eq!(dur.div_f64(3.14), Duration::new(0, 859_872_611));
+ /// // note that truncation is used, not rounding
+ /// assert_eq!(dur.div_f64(3.14e5), Duration::new(0, 8_598));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn div_f64(self, rhs: f64) -> Duration {
+ Duration::from_secs_f64(self.as_secs_f64() / rhs)
+ }
+
+ /// Divide `Duration` by `f32`.
+ ///
+ /// # Panics
+ /// This method will panic if result is not finite, negative or overflows `Duration`.
+ ///
+ /// # Examples
+ /// ```
+ /// use std::time::Duration;
+ ///
+ /// let dur = Duration::new(2, 700_000_000);
+ /// // note that due to rounding errors result is slightly
+ /// // different from 0.859_872_611
+ /// assert_eq!(dur.div_f32(3.14), Duration::new(0, 859_872_576));
+ /// // note that truncation is used, not rounding
+ /// assert_eq!(dur.div_f32(3.14e5), Duration::new(0, 8_598));
+ /// ```
+ #[stable(feature = "duration_float", since = "1.38.0")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn div_f32(self, rhs: f32) -> Duration {
+ Duration::from_secs_f32(self.as_secs_f32() / rhs)
+ }
+
+ /// Divide `Duration` by `Duration` and return `f64`.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(div_duration)]
+ /// use std::time::Duration;
+ ///
+ /// let dur1 = Duration::new(2, 700_000_000);
+ /// let dur2 = Duration::new(5, 400_000_000);
+ /// assert_eq!(dur1.div_duration_f64(dur2), 0.5);
+ /// ```
+ #[unstable(feature = "div_duration", issue = "63139")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn div_duration_f64(self, rhs: Duration) -> f64 {
+ self.as_secs_f64() / rhs.as_secs_f64()
+ }
+
+ /// Divide `Duration` by `Duration` and return `f32`.
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(div_duration)]
+ /// use std::time::Duration;
+ ///
+ /// let dur1 = Duration::new(2, 700_000_000);
+ /// let dur2 = Duration::new(5, 400_000_000);
+ /// assert_eq!(dur1.div_duration_f32(dur2), 0.5);
+ /// ```
+ #[unstable(feature = "div_duration", issue = "63139")]
+ #[inline]
+ #[rustc_const_unstable(feature = "duration_consts_2", issue = "72440")]
+ pub const fn div_duration_f32(self, rhs: Duration) -> f32 {
+ self.as_secs_f32() / rhs.as_secs_f32()
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Add for Duration {
+ type Output = Duration;
+
+ fn add(self, rhs: Duration) -> Duration {
+ self.checked_add(rhs).expect("overflow when adding durations")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl AddAssign for Duration {
+ fn add_assign(&mut self, rhs: Duration) {
+ *self = *self + rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Sub for Duration {
+ type Output = Duration;
+
+ fn sub(self, rhs: Duration) -> Duration {
+ self.checked_sub(rhs).expect("overflow when subtracting durations")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl SubAssign for Duration {
+ fn sub_assign(&mut self, rhs: Duration) {
+ *self = *self - rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Mul<u32> for Duration {
+ type Output = Duration;
+
+ fn mul(self, rhs: u32) -> Duration {
+ self.checked_mul(rhs).expect("overflow when multiplying duration by scalar")
+ }
+}
+
+#[stable(feature = "symmetric_u32_duration_mul", since = "1.31.0")]
+impl Mul<Duration> for u32 {
+ type Output = Duration;
+
+ fn mul(self, rhs: Duration) -> Duration {
+ rhs * self
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl MulAssign<u32> for Duration {
+ fn mul_assign(&mut self, rhs: u32) {
+ *self = *self * rhs;
+ }
+}
+
+#[stable(feature = "duration", since = "1.3.0")]
+impl Div<u32> for Duration {
+ type Output = Duration;
+
+ fn div(self, rhs: u32) -> Duration {
+ self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
+ }
+}
+
+#[stable(feature = "time_augmented_assignment", since = "1.9.0")]
+impl DivAssign<u32> for Duration {
+ fn div_assign(&mut self, rhs: u32) {
+ *self = *self / rhs;
+ }
+}
+
+macro_rules! sum_durations {
+ ($iter:expr) => {{
+ let mut total_secs: u64 = 0;
+ let mut total_nanos: u64 = 0;
+
+ for entry in $iter {
+ total_secs =
+ total_secs.checked_add(entry.secs).expect("overflow in iter::sum over durations");
+ total_nanos = match total_nanos.checked_add(entry.nanos as u64) {
+ Some(n) => n,
+ None => {
+ total_secs = total_secs
+ .checked_add(total_nanos / NANOS_PER_SEC as u64)
+ .expect("overflow in iter::sum over durations");
+ (total_nanos % NANOS_PER_SEC as u64) + entry.nanos as u64
+ }
+ };
+ }
+ total_secs = total_secs
+ .checked_add(total_nanos / NANOS_PER_SEC as u64)
+ .expect("overflow in iter::sum over durations");
+ total_nanos = total_nanos % NANOS_PER_SEC as u64;
+ Duration { secs: total_secs, nanos: total_nanos as u32 }
+ }};
+}
+
+#[stable(feature = "duration_sum", since = "1.16.0")]
+impl Sum for Duration {
+ fn sum<I: Iterator<Item = Duration>>(iter: I) -> Duration {
+ sum_durations!(iter)
+ }
+}
+
+#[stable(feature = "duration_sum", since = "1.16.0")]
+impl<'a> Sum<&'a Duration> for Duration {
+ fn sum<I: Iterator<Item = &'a Duration>>(iter: I) -> Duration {
+ sum_durations!(iter)
+ }
+}
+
+#[stable(feature = "duration_debug_impl", since = "1.27.0")]
+impl fmt::Debug for Duration {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// Formats a floating point number in decimal notation.
+ ///
+ /// The number is given as the `integer_part` and a fractional part.
+ /// The value of the fractional part is `fractional_part / divisor`. So
+ /// `integer_part` = 3, `fractional_part` = 12 and `divisor` = 100
+ /// represents the number `3.012`. Trailing zeros are omitted.
+ ///
+ /// `divisor` must not be above 100_000_000. It also should be a power
+ /// of 10, everything else doesn't make sense. `fractional_part` has
+ /// to be less than `10 * divisor`!
+ fn fmt_decimal(
+ f: &mut fmt::Formatter<'_>,
+ mut integer_part: u64,
+ mut fractional_part: u32,
+ mut divisor: u32,
+ ) -> fmt::Result {
+ // Encode the fractional part into a temporary buffer. The buffer
+ // only need to hold 9 elements, because `fractional_part` has to
+ // be smaller than 10^9. The buffer is prefilled with '0' digits
+ // to simplify the code below.
+ let mut buf = [b'0'; 9];
+
+ // The next digit is written at this position
+ let mut pos = 0;
+
+ // We keep writing digits into the buffer while there are non-zero
+ // digits left and we haven't written enough digits yet.
+ while fractional_part > 0 && pos < f.precision().unwrap_or(9) {
+ // Write new digit into the buffer
+ buf[pos] = b'0' + (fractional_part / divisor) as u8;
+
+ fractional_part %= divisor;
+ divisor /= 10;
+ pos += 1;
+ }
+
+ // If a precision < 9 was specified, there may be some non-zero
+ // digits left that weren't written into the buffer. In that case we
+ // need to perform rounding to match the semantics of printing
+ // normal floating point numbers. However, we only need to do work
+ // when rounding up. This happens if the first digit of the
+ // remaining ones is >= 5.
+ if fractional_part > 0 && fractional_part >= divisor * 5 {
+ // Round up the number contained in the buffer. We go through
+ // the buffer backwards and keep track of the carry.
+ let mut rev_pos = pos;
+ let mut carry = true;
+ while carry && rev_pos > 0 {
+ rev_pos -= 1;
+
+ // If the digit in the buffer is not '9', we just need to
+ // increment it and can stop then (since we don't have a
+ // carry anymore). Otherwise, we set it to '0' (overflow)
+ // and continue.
+ if buf[rev_pos] < b'9' {
+ buf[rev_pos] += 1;
+ carry = false;
+ } else {
+ buf[rev_pos] = b'0';
+ }
+ }
+
+ // If we still have the carry bit set, that means that we set
+ // the whole buffer to '0's and need to increment the integer
+ // part.
+ if carry {
+ integer_part += 1;
+ }
+ }
+
+ // Determine the end of the buffer: if precision is set, we just
+ // use as many digits from the buffer (capped to 9). If it isn't
+ // set, we only use all digits up to the last non-zero one.
+ let end = f.precision().map(|p| crate::cmp::min(p, 9)).unwrap_or(pos);
+
+ // If we haven't emitted a single fractional digit and the precision
+ // wasn't set to a non-zero value, we don't print the decimal point.
+ if end == 0 {
+ write!(f, "{}", integer_part)
+ } else {
+ // SAFETY: We are only writing ASCII digits into the buffer and it was
+ // initialized with '0's, so it contains valid UTF8.
+ let s = unsafe { crate::str::from_utf8_unchecked(&buf[..end]) };
+
+ // If the user request a precision > 9, we pad '0's at the end.
+ let w = f.precision().unwrap_or(pos);
+ write!(f, "{}.{:0<width$}", integer_part, s, width = w)
+ }
+ }
+
+ // Print leading '+' sign if requested
+ if f.sign_plus() {
+ write!(f, "+")?;
+ }
+
+ if self.secs > 0 {
+ fmt_decimal(f, self.secs, self.nanos, 100_000_000)?;
+ f.write_str("s")
+ } else if self.nanos >= 1_000_000 {
+ fmt_decimal(f, self.nanos as u64 / 1_000_000, self.nanos % 1_000_000, 100_000)?;
+ f.write_str("ms")
+ } else if self.nanos >= 1_000 {
+ fmt_decimal(f, self.nanos as u64 / 1_000, self.nanos % 1_000, 100)?;
+ f.write_str("µs")
+ } else {
+ fmt_decimal(f, self.nanos as u64, 0, 1)?;
+ f.write_str("ns")
+ }
+ }
+}
--- /dev/null
+// See src/libstd/primitive_docs.rs for documentation.
+
+use crate::cmp::Ordering::*;
+use crate::cmp::*;
+
+// macro for implementing n-ary tuple functions and operations
+macro_rules! tuple_impls {
+ ($(
+ $Tuple:ident {
+ $(($idx:tt) -> $T:ident)+
+ }
+ )+) => {
+ $(
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:PartialEq),+> PartialEq for ($($T,)+) where last_type!($($T,)+): ?Sized {
+ #[inline]
+ fn eq(&self, other: &($($T,)+)) -> bool {
+ $(self.$idx == other.$idx)&&+
+ }
+ #[inline]
+ fn ne(&self, other: &($($T,)+)) -> bool {
+ $(self.$idx != other.$idx)||+
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:Eq),+> Eq for ($($T,)+) where last_type!($($T,)+): ?Sized {}
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:PartialOrd + PartialEq),+> PartialOrd for ($($T,)+)
+ where last_type!($($T,)+): ?Sized {
+ #[inline]
+ fn partial_cmp(&self, other: &($($T,)+)) -> Option<Ordering> {
+ lexical_partial_cmp!($(self.$idx, other.$idx),+)
+ }
+ #[inline]
+ fn lt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(lt, $(self.$idx, other.$idx),+)
+ }
+ #[inline]
+ fn le(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(le, $(self.$idx, other.$idx),+)
+ }
+ #[inline]
+ fn ge(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(ge, $(self.$idx, other.$idx),+)
+ }
+ #[inline]
+ fn gt(&self, other: &($($T,)+)) -> bool {
+ lexical_ord!(gt, $(self.$idx, other.$idx),+)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:Ord),+> Ord for ($($T,)+) where last_type!($($T,)+): ?Sized {
+ #[inline]
+ fn cmp(&self, other: &($($T,)+)) -> Ordering {
+ lexical_cmp!($(self.$idx, other.$idx),+)
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ impl<$($T:Default),+> Default for ($($T,)+) {
+ #[inline]
+ fn default() -> ($($T,)+) {
+ ($({ let x: $T = Default::default(); x},)+)
+ }
+ }
+ )+
+ }
+}
+
+// Constructs an expression that performs a lexical ordering using method $rel.
+// The values are interleaved, so the macro invocation for
+// `(a1, a2, a3) < (b1, b2, b3)` would be `lexical_ord!(lt, a1, b1, a2, b2,
+// a3, b3)` (and similarly for `lexical_cmp`)
+macro_rules! lexical_ord {
+ ($rel: ident, $a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
+ if $a != $b { lexical_ord!($rel, $a, $b) }
+ else { lexical_ord!($rel, $($rest_a, $rest_b),+) }
+ };
+ ($rel: ident, $a:expr, $b:expr) => { ($a) . $rel (& $b) };
+}
+
+macro_rules! lexical_partial_cmp {
+ ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
+ match ($a).partial_cmp(&$b) {
+ Some(Equal) => lexical_partial_cmp!($($rest_a, $rest_b),+),
+ ordering => ordering
+ }
+ };
+ ($a:expr, $b:expr) => { ($a).partial_cmp(&$b) };
+}
+
+macro_rules! lexical_cmp {
+ ($a:expr, $b:expr, $($rest_a:expr, $rest_b:expr),+) => {
+ match ($a).cmp(&$b) {
+ Equal => lexical_cmp!($($rest_a, $rest_b),+),
+ ordering => ordering
+ }
+ };
+ ($a:expr, $b:expr) => { ($a).cmp(&$b) };
+}
+
+macro_rules! last_type {
+ ($a:ident,) => { $a };
+ ($a:ident, $($rest_a:ident,)+) => { last_type!($($rest_a,)+) };
+}
+
+tuple_impls! {
+ Tuple1 {
+ (0) -> A
+ }
+ Tuple2 {
+ (0) -> A
+ (1) -> B
+ }
+ Tuple3 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ }
+ Tuple4 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ }
+ Tuple5 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ }
+ Tuple6 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ }
+ Tuple7 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ (6) -> G
+ }
+ Tuple8 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ (6) -> G
+ (7) -> H
+ }
+ Tuple9 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ (6) -> G
+ (7) -> H
+ (8) -> I
+ }
+ Tuple10 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ (6) -> G
+ (7) -> H
+ (8) -> I
+ (9) -> J
+ }
+ Tuple11 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ (6) -> G
+ (7) -> H
+ (8) -> I
+ (9) -> J
+ (10) -> K
+ }
+ Tuple12 {
+ (0) -> A
+ (1) -> B
+ (2) -> C
+ (3) -> D
+ (4) -> E
+ (5) -> F
+ (6) -> G
+ (7) -> H
+ (8) -> I
+ (9) -> J
+ (10) -> K
+ (11) -> L
+ }
+}
--- /dev/null
+#![unstable(feature = "unicode_internals", issue = "none")]
+#![allow(missing_docs)]
+
+pub(crate) mod printable;
+mod unicode_data;
+
+/// The version of [Unicode](http://www.unicode.org/) that the Unicode parts of
+/// `char` and `str` methods are based on.
+///
+/// New versions of Unicode are released regularly and subsequently all methods
+/// in the standard library depending on Unicode are updated. Therefore the
+/// behavior of some `char` and `str` methods and the value of this constant
+/// changes over time. This is *not* considered to be a breaking change.
+///
+/// The version numbering scheme is explained in
+/// [Unicode 11.0 or later, Section 3.1 Versions of the Unicode Standard](https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf#page=4).
+#[stable(feature = "unicode_version", since = "1.45.0")]
+pub const UNICODE_VERSION: (u8, u8, u8) = unicode_data::UNICODE_VERSION;
+
+// For use in liballoc, not re-exported in libstd.
+pub mod derived_property {
+ pub use super::{Case_Ignorable, Cased};
+}
+
+pub use unicode_data::alphabetic::lookup as Alphabetic;
+pub use unicode_data::case_ignorable::lookup as Case_Ignorable;
+pub use unicode_data::cased::lookup as Cased;
+pub use unicode_data::cc::lookup as Cc;
+pub use unicode_data::conversions;
+pub use unicode_data::grapheme_extend::lookup as Grapheme_Extend;
+pub use unicode_data::lowercase::lookup as Lowercase;
+pub use unicode_data::n::lookup as N;
+pub use unicode_data::uppercase::lookup as Uppercase;
+pub use unicode_data::white_space::lookup as White_Space;
--- /dev/null
+#!/usr/bin/env python
+
+# This script uses the following Unicode tables:
+# - UnicodeData.txt
+
+
+from collections import namedtuple
+import csv
+import os
+import subprocess
+
+NUM_CODEPOINTS=0x110000
+
+def to_ranges(iter):
+ current = None
+ for i in iter:
+ if current is None or i != current[1] or i in (0x10000, 0x20000):
+ if current is not None:
+ yield tuple(current)
+ current = [i, i + 1]
+ else:
+ current[1] += 1
+ if current is not None:
+ yield tuple(current)
+
+def get_escaped(codepoints):
+ for c in codepoints:
+ if (c.class_ or "Cn") in "Cc Cf Cs Co Cn Zl Zp Zs".split() and c.value != ord(' '):
+ yield c.value
+
+def get_file(f):
+ try:
+ return open(os.path.basename(f))
+ except FileNotFoundError:
+ subprocess.run(["curl", "-O", f], check=True)
+ return open(os.path.basename(f))
+
+Codepoint = namedtuple('Codepoint', 'value class_')
+
+def get_codepoints(f):
+ r = csv.reader(f, delimiter=";")
+ prev_codepoint = 0
+ class_first = None
+ for row in r:
+ codepoint = int(row[0], 16)
+ name = row[1]
+ class_ = row[2]
+
+ if class_first is not None:
+ if not name.endswith("Last>"):
+ raise ValueError("Missing Last after First")
+
+ for c in range(prev_codepoint + 1, codepoint):
+ yield Codepoint(c, class_first)
+
+ class_first = None
+ if name.endswith("First>"):
+ class_first = class_
+
+ yield Codepoint(codepoint, class_)
+ prev_codepoint = codepoint
+
+ if class_first is not None:
+ raise ValueError("Missing Last after First")
+
+ for c in range(prev_codepoint + 1, NUM_CODEPOINTS):
+ yield Codepoint(c, None)
+
+def compress_singletons(singletons):
+ uppers = [] # (upper, # items in lowers)
+ lowers = []
+
+ for i in singletons:
+ upper = i >> 8
+ lower = i & 0xff
+ if len(uppers) == 0 or uppers[-1][0] != upper:
+ uppers.append((upper, 1))
+ else:
+ upper, count = uppers[-1]
+ uppers[-1] = upper, count + 1
+ lowers.append(lower)
+
+ return uppers, lowers
+
+def compress_normal(normal):
+ # lengths 0x00..0x7f are encoded as 00, 01, ..., 7e, 7f
+ # lengths 0x80..0x7fff are encoded as 80 80, 80 81, ..., ff fe, ff ff
+ compressed = [] # [truelen, (truelenaux), falselen, (falselenaux)]
+
+ prev_start = 0
+ for start, count in normal:
+ truelen = start - prev_start
+ falselen = count
+ prev_start = start + count
+
+ assert truelen < 0x8000 and falselen < 0x8000
+ entry = []
+ if truelen > 0x7f:
+ entry.append(0x80 | (truelen >> 8))
+ entry.append(truelen & 0xff)
+ else:
+ entry.append(truelen & 0x7f)
+ if falselen > 0x7f:
+ entry.append(0x80 | (falselen >> 8))
+ entry.append(falselen & 0xff)
+ else:
+ entry.append(falselen & 0x7f)
+
+ compressed.append(entry)
+
+ return compressed
+
+def print_singletons(uppers, lowers, uppersname, lowersname):
+ print("#[rustfmt::skip]")
+ print("const {}: &[(u8, u8)] = &[".format(uppersname))
+ for u, c in uppers:
+ print(" ({:#04x}, {}),".format(u, c))
+ print("];")
+ print("#[rustfmt::skip]")
+ print("const {}: &[u8] = &[".format(lowersname))
+ for i in range(0, len(lowers), 8):
+ print(" {}".format(" ".join("{:#04x},".format(l) for l in lowers[i:i+8])))
+ print("];")
+
+def print_normal(normal, normalname):
+ print("#[rustfmt::skip]")
+ print("const {}: &[u8] = &[".format(normalname))
+ for v in normal:
+ print(" {}".format(" ".join("{:#04x},".format(i) for i in v)))
+ print("];")
+
+def main():
+ file = get_file("http://www.unicode.org/Public/UNIDATA/UnicodeData.txt")
+
+ codepoints = get_codepoints(file)
+
+ CUTOFF=0x10000
+ singletons0 = []
+ singletons1 = []
+ normal0 = []
+ normal1 = []
+ extra = []
+
+ for a, b in to_ranges(get_escaped(codepoints)):
+ if a > 2 * CUTOFF:
+ extra.append((a, b - a))
+ elif a == b - 1:
+ if a & CUTOFF:
+ singletons1.append(a & ~CUTOFF)
+ else:
+ singletons0.append(a)
+ elif a == b - 2:
+ if a & CUTOFF:
+ singletons1.append(a & ~CUTOFF)
+ singletons1.append((a + 1) & ~CUTOFF)
+ else:
+ singletons0.append(a)
+ singletons0.append(a + 1)
+ else:
+ if a >= 2 * CUTOFF:
+ extra.append((a, b - a))
+ elif a & CUTOFF:
+ normal1.append((a & ~CUTOFF, b - a))
+ else:
+ normal0.append((a, b - a))
+
+ singletons0u, singletons0l = compress_singletons(singletons0)
+ singletons1u, singletons1l = compress_singletons(singletons1)
+ normal0 = compress_normal(normal0)
+ normal1 = compress_normal(normal1)
+
+ print("""\
+// NOTE: The following code was generated by "src/libcore/unicode/printable.py",
+// do not edit directly!
+
+fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool {
+ let xupper = (x >> 8) as u8;
+ let mut lowerstart = 0;
+ for &(upper, lowercount) in singletonuppers {
+ let lowerend = lowerstart + lowercount as usize;
+ if xupper == upper {
+ for &lower in &singletonlowers[lowerstart..lowerend] {
+ if lower == x as u8 {
+ return false;
+ }
+ }
+ } else if xupper < upper {
+ break;
+ }
+ lowerstart = lowerend;
+ }
+
+ let mut x = x as i32;
+ let mut normal = normal.iter().cloned();
+ let mut current = true;
+ while let Some(v) = normal.next() {
+ let len = if v & 0x80 != 0 {
+ ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32
+ } else {
+ v as i32
+ };
+ x -= len;
+ if x < 0 {
+ break;
+ }
+ current = !current;
+ }
+ current
+}
+
+pub(crate) fn is_printable(x: char) -> bool {
+ let x = x as u32;
+ let lower = x as u16;
+ if x < 0x10000 {
+ check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0)
+ } else if x < 0x20000 {
+ check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1)
+ } else {\
+""")
+ for a, b in extra:
+ print(" if 0x{:x} <= x && x < 0x{:x} {{".format(a, a + b))
+ print(" return false;")
+ print(" }")
+ print("""\
+ true
+ }
+}\
+""")
+ print()
+ print_singletons(singletons0u, singletons0l, 'SINGLETONS0U', 'SINGLETONS0L')
+ print_singletons(singletons1u, singletons1l, 'SINGLETONS1U', 'SINGLETONS1L')
+ print_normal(normal0, 'NORMAL0')
+ print_normal(normal1, 'NORMAL1')
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+// NOTE: The following code was generated by "src/libcore/unicode/printable.py",
+// do not edit directly!
+
+fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool {
+ let xupper = (x >> 8) as u8;
+ let mut lowerstart = 0;
+ for &(upper, lowercount) in singletonuppers {
+ let lowerend = lowerstart + lowercount as usize;
+ if xupper == upper {
+ for &lower in &singletonlowers[lowerstart..lowerend] {
+ if lower == x as u8 {
+ return false;
+ }
+ }
+ } else if xupper < upper {
+ break;
+ }
+ lowerstart = lowerend;
+ }
+
+ let mut x = x as i32;
+ let mut normal = normal.iter().cloned();
+ let mut current = true;
+ while let Some(v) = normal.next() {
+ let len = if v & 0x80 != 0 {
+ ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32
+ } else {
+ v as i32
+ };
+ x -= len;
+ if x < 0 {
+ break;
+ }
+ current = !current;
+ }
+ current
+}
+
+pub(crate) fn is_printable(x: char) -> bool {
+ let x = x as u32;
+ let lower = x as u16;
+ if x < 0x10000 {
+ check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0)
+ } else if x < 0x20000 {
+ check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1)
+ } else {
+ if 0x2a6de <= x && x < 0x2a700 {
+ return false;
+ }
+ if 0x2b735 <= x && x < 0x2b740 {
+ return false;
+ }
+ if 0x2b81e <= x && x < 0x2b820 {
+ return false;
+ }
+ if 0x2cea2 <= x && x < 0x2ceb0 {
+ return false;
+ }
+ if 0x2ebe1 <= x && x < 0x2f800 {
+ return false;
+ }
+ if 0x2fa1e <= x && x < 0x30000 {
+ return false;
+ }
+ if 0x3134b <= x && x < 0xe0100 {
+ return false;
+ }
+ if 0xe01f0 <= x && x < 0x110000 {
+ return false;
+ }
+ true
+ }
+}
+
+#[rustfmt::skip]
+const SINGLETONS0U: &[(u8, u8)] = &[
+ (0x00, 1),
+ (0x03, 5),
+ (0x05, 6),
+ (0x06, 3),
+ (0x07, 6),
+ (0x08, 8),
+ (0x09, 17),
+ (0x0a, 28),
+ (0x0b, 25),
+ (0x0c, 20),
+ (0x0d, 16),
+ (0x0e, 13),
+ (0x0f, 4),
+ (0x10, 3),
+ (0x12, 18),
+ (0x13, 9),
+ (0x16, 1),
+ (0x17, 5),
+ (0x18, 2),
+ (0x19, 3),
+ (0x1a, 7),
+ (0x1c, 2),
+ (0x1d, 1),
+ (0x1f, 22),
+ (0x20, 3),
+ (0x2b, 3),
+ (0x2c, 2),
+ (0x2d, 11),
+ (0x2e, 1),
+ (0x30, 3),
+ (0x31, 2),
+ (0x32, 1),
+ (0xa7, 2),
+ (0xa9, 2),
+ (0xaa, 4),
+ (0xab, 8),
+ (0xfa, 2),
+ (0xfb, 5),
+ (0xfd, 4),
+ (0xfe, 3),
+ (0xff, 9),
+];
+#[rustfmt::skip]
+const SINGLETONS0L: &[u8] = &[
+ 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57,
+ 0x58, 0x8b, 0x8c, 0x90, 0x1c, 0x1d, 0xdd, 0x0e,
+ 0x0f, 0x4b, 0x4c, 0xfb, 0xfc, 0x2e, 0x2f, 0x3f,
+ 0x5c, 0x5d, 0x5f, 0xb5, 0xe2, 0x84, 0x8d, 0x8e,
+ 0x91, 0x92, 0xa9, 0xb1, 0xba, 0xbb, 0xc5, 0xc6,
+ 0xc9, 0xca, 0xde, 0xe4, 0xe5, 0xff, 0x00, 0x04,
+ 0x11, 0x12, 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b,
+ 0x3d, 0x49, 0x4a, 0x5d, 0x84, 0x8e, 0x92, 0xa9,
+ 0xb1, 0xb4, 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf,
+ 0xe4, 0xe5, 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12,
+ 0x29, 0x31, 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49,
+ 0x4a, 0x5e, 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d,
+ 0xc9, 0xce, 0xcf, 0x0d, 0x11, 0x29, 0x45, 0x49,
+ 0x57, 0x64, 0x65, 0x8d, 0x91, 0xa9, 0xb4, 0xba,
+ 0xbb, 0xc5, 0xc9, 0xdf, 0xe4, 0xe5, 0xf0, 0x0d,
+ 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x84, 0xb2,
+ 0xbc, 0xbe, 0xbf, 0xd5, 0xd7, 0xf0, 0xf1, 0x83,
+ 0x85, 0x8b, 0xa4, 0xa6, 0xbe, 0xbf, 0xc5, 0xc7,
+ 0xce, 0xcf, 0xda, 0xdb, 0x48, 0x98, 0xbd, 0xcd,
+ 0xc6, 0xce, 0xcf, 0x49, 0x4e, 0x4f, 0x57, 0x59,
+ 0x5e, 0x5f, 0x89, 0x8e, 0x8f, 0xb1, 0xb6, 0xb7,
+ 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, 0x11, 0x16, 0x17,
+ 0x5b, 0x5c, 0xf6, 0xf7, 0xfe, 0xff, 0x80, 0x0d,
+ 0x6d, 0x71, 0xde, 0xdf, 0x0e, 0x0f, 0x1f, 0x6e,
+ 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, 0xae, 0xaf,
+ 0xbb, 0xbc, 0xfa, 0x16, 0x17, 0x1e, 0x1f, 0x46,
+ 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e, 0x7e,
+ 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0, 0xf1,
+ 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, 0x96, 0x2f,
+ 0x5f, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf,
+ 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98,
+ 0x30, 0x8f, 0x1f, 0xc0, 0xc1, 0xce, 0xff, 0x4e,
+ 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27,
+ 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f,
+ 0x42, 0x45, 0x90, 0x91, 0xfe, 0xff, 0x53, 0x67,
+ 0x75, 0xc8, 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7,
+ 0xfe, 0xff,
+];
+#[rustfmt::skip]
+const SINGLETONS1U: &[(u8, u8)] = &[
+ (0x00, 6),
+ (0x01, 1),
+ (0x03, 1),
+ (0x04, 2),
+ (0x08, 8),
+ (0x09, 2),
+ (0x0a, 5),
+ (0x0b, 2),
+ (0x0e, 4),
+ (0x10, 1),
+ (0x11, 2),
+ (0x12, 5),
+ (0x13, 17),
+ (0x14, 1),
+ (0x15, 2),
+ (0x17, 2),
+ (0x19, 13),
+ (0x1c, 5),
+ (0x1d, 8),
+ (0x24, 1),
+ (0x6a, 3),
+ (0x6b, 2),
+ (0xbc, 2),
+ (0xd1, 2),
+ (0xd4, 12),
+ (0xd5, 9),
+ (0xd6, 2),
+ (0xd7, 2),
+ (0xda, 1),
+ (0xe0, 5),
+ (0xe1, 2),
+ (0xe8, 2),
+ (0xee, 32),
+ (0xf0, 4),
+ (0xf8, 2),
+ (0xf9, 2),
+ (0xfa, 2),
+ (0xfb, 1),
+];
+#[rustfmt::skip]
+const SINGLETONS1L: &[u8] = &[
+ 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e,
+ 0x9e, 0x9f, 0x06, 0x07, 0x09, 0x36, 0x3d, 0x3e,
+ 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18, 0x36,
+ 0x37, 0x56, 0x57, 0x7f, 0xaa, 0xae, 0xaf, 0xbd,
+ 0x35, 0xe0, 0x12, 0x87, 0x89, 0x8e, 0x9e, 0x04,
+ 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a,
+ 0x45, 0x46, 0x49, 0x4a, 0x4e, 0x4f, 0x64, 0x65,
+ 0x5c, 0xb6, 0xb7, 0x1b, 0x1c, 0x07, 0x08, 0x0a,
+ 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8, 0xa9,
+ 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8, 0x07,
+ 0x0a, 0x3b, 0x3e, 0x66, 0x69, 0x8f, 0x92, 0x6f,
+ 0x5f, 0xee, 0xef, 0x5a, 0x62, 0x9a, 0x9b, 0x27,
+ 0x28, 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7,
+ 0xa8, 0xad, 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c,
+ 0x15, 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7,
+ 0xcc, 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25,
+ 0x3e, 0x3f, 0xc5, 0xc6, 0x04, 0x20, 0x23, 0x25,
+ 0x26, 0x28, 0x33, 0x38, 0x3a, 0x48, 0x4a, 0x4c,
+ 0x50, 0x53, 0x55, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
+ 0x60, 0x63, 0x65, 0x66, 0x6b, 0x73, 0x78, 0x7d,
+ 0x7f, 0x8a, 0xa4, 0xaa, 0xaf, 0xb0, 0xc0, 0xd0,
+ 0xae, 0xaf, 0x79, 0xcc, 0x6e, 0x6f, 0x93,
+];
+#[rustfmt::skip]
+const NORMAL0: &[u8] = &[
+ 0x00, 0x20,
+ 0x5f, 0x22,
+ 0x82, 0xdf, 0x04,
+ 0x82, 0x44, 0x08,
+ 0x1b, 0x04,
+ 0x06, 0x11,
+ 0x81, 0xac, 0x0e,
+ 0x80, 0xab, 0x35,
+ 0x28, 0x0b,
+ 0x80, 0xe0, 0x03,
+ 0x19, 0x08,
+ 0x01, 0x04,
+ 0x2f, 0x04,
+ 0x34, 0x04,
+ 0x07, 0x03,
+ 0x01, 0x07,
+ 0x06, 0x07,
+ 0x11, 0x0a,
+ 0x50, 0x0f,
+ 0x12, 0x07,
+ 0x55, 0x07,
+ 0x03, 0x04,
+ 0x1c, 0x0a,
+ 0x09, 0x03,
+ 0x08, 0x03,
+ 0x07, 0x03,
+ 0x02, 0x03,
+ 0x03, 0x03,
+ 0x0c, 0x04,
+ 0x05, 0x03,
+ 0x0b, 0x06,
+ 0x01, 0x0e,
+ 0x15, 0x05,
+ 0x3a, 0x03,
+ 0x11, 0x07,
+ 0x06, 0x05,
+ 0x10, 0x07,
+ 0x57, 0x07,
+ 0x02, 0x07,
+ 0x15, 0x0d,
+ 0x50, 0x04,
+ 0x43, 0x03,
+ 0x2d, 0x03,
+ 0x01, 0x04,
+ 0x11, 0x06,
+ 0x0f, 0x0c,
+ 0x3a, 0x04,
+ 0x1d, 0x25,
+ 0x5f, 0x20,
+ 0x6d, 0x04,
+ 0x6a, 0x25,
+ 0x80, 0xc8, 0x05,
+ 0x82, 0xb0, 0x03,
+ 0x1a, 0x06,
+ 0x82, 0xfd, 0x03,
+ 0x59, 0x07,
+ 0x15, 0x0b,
+ 0x17, 0x09,
+ 0x14, 0x0c,
+ 0x14, 0x0c,
+ 0x6a, 0x06,
+ 0x0a, 0x06,
+ 0x1a, 0x06,
+ 0x59, 0x07,
+ 0x2b, 0x05,
+ 0x46, 0x0a,
+ 0x2c, 0x04,
+ 0x0c, 0x04,
+ 0x01, 0x03,
+ 0x31, 0x0b,
+ 0x2c, 0x04,
+ 0x1a, 0x06,
+ 0x0b, 0x03,
+ 0x80, 0xac, 0x06,
+ 0x0a, 0x06,
+ 0x21, 0x3f,
+ 0x4c, 0x04,
+ 0x2d, 0x03,
+ 0x74, 0x08,
+ 0x3c, 0x03,
+ 0x0f, 0x03,
+ 0x3c, 0x07,
+ 0x38, 0x08,
+ 0x2b, 0x05,
+ 0x82, 0xff, 0x11,
+ 0x18, 0x08,
+ 0x2f, 0x11,
+ 0x2d, 0x03,
+ 0x20, 0x10,
+ 0x21, 0x0f,
+ 0x80, 0x8c, 0x04,
+ 0x82, 0x97, 0x19,
+ 0x0b, 0x15,
+ 0x88, 0x94, 0x05,
+ 0x2f, 0x05,
+ 0x3b, 0x07,
+ 0x02, 0x0e,
+ 0x18, 0x09,
+ 0x80, 0xb3, 0x2d,
+ 0x74, 0x0c,
+ 0x80, 0xd6, 0x1a,
+ 0x0c, 0x05,
+ 0x80, 0xff, 0x05,
+ 0x80, 0xdf, 0x0c,
+ 0xee, 0x0d, 0x03,
+ 0x84, 0x8d, 0x03,
+ 0x37, 0x09,
+ 0x81, 0x5c, 0x14,
+ 0x80, 0xb8, 0x08,
+ 0x80, 0xcb, 0x2a,
+ 0x38, 0x03,
+ 0x0a, 0x06,
+ 0x38, 0x08,
+ 0x46, 0x08,
+ 0x0c, 0x06,
+ 0x74, 0x0b,
+ 0x1e, 0x03,
+ 0x5a, 0x04,
+ 0x59, 0x09,
+ 0x80, 0x83, 0x18,
+ 0x1c, 0x0a,
+ 0x16, 0x09,
+ 0x4c, 0x04,
+ 0x80, 0x8a, 0x06,
+ 0xab, 0xa4, 0x0c,
+ 0x17, 0x04,
+ 0x31, 0xa1, 0x04,
+ 0x81, 0xda, 0x26,
+ 0x07, 0x0c,
+ 0x05, 0x05,
+ 0x80, 0xa5, 0x11,
+ 0x81, 0x6d, 0x10,
+ 0x78, 0x28,
+ 0x2a, 0x06,
+ 0x4c, 0x04,
+ 0x80, 0x8d, 0x04,
+ 0x80, 0xbe, 0x03,
+ 0x1b, 0x03,
+ 0x0f, 0x0d,
+];
+#[rustfmt::skip]
+const NORMAL1: &[u8] = &[
+ 0x5e, 0x22,
+ 0x7b, 0x05,
+ 0x03, 0x04,
+ 0x2d, 0x03,
+ 0x66, 0x03,
+ 0x01, 0x2f,
+ 0x2e, 0x80, 0x82,
+ 0x1d, 0x03,
+ 0x31, 0x0f,
+ 0x1c, 0x04,
+ 0x24, 0x09,
+ 0x1e, 0x05,
+ 0x2b, 0x05,
+ 0x44, 0x04,
+ 0x0e, 0x2a,
+ 0x80, 0xaa, 0x06,
+ 0x24, 0x04,
+ 0x24, 0x04,
+ 0x28, 0x08,
+ 0x34, 0x0b,
+ 0x01, 0x80, 0x90,
+ 0x81, 0x37, 0x09,
+ 0x16, 0x0a,
+ 0x08, 0x80, 0x98,
+ 0x39, 0x03,
+ 0x63, 0x08,
+ 0x09, 0x30,
+ 0x16, 0x05,
+ 0x21, 0x03,
+ 0x1b, 0x05,
+ 0x01, 0x40,
+ 0x38, 0x04,
+ 0x4b, 0x05,
+ 0x2f, 0x04,
+ 0x0a, 0x07,
+ 0x09, 0x07,
+ 0x40, 0x20,
+ 0x27, 0x04,
+ 0x0c, 0x09,
+ 0x36, 0x03,
+ 0x3a, 0x05,
+ 0x1a, 0x07,
+ 0x04, 0x0c,
+ 0x07, 0x50,
+ 0x49, 0x37,
+ 0x33, 0x0d,
+ 0x33, 0x07,
+ 0x2e, 0x08,
+ 0x0a, 0x81, 0x26,
+ 0x52, 0x4e,
+ 0x28, 0x08,
+ 0x2a, 0x56,
+ 0x1c, 0x14,
+ 0x17, 0x09,
+ 0x4e, 0x04,
+ 0x1e, 0x0f,
+ 0x43, 0x0e,
+ 0x19, 0x07,
+ 0x0a, 0x06,
+ 0x48, 0x08,
+ 0x27, 0x09,
+ 0x75, 0x0b,
+ 0x3f, 0x41,
+ 0x2a, 0x06,
+ 0x3b, 0x05,
+ 0x0a, 0x06,
+ 0x51, 0x06,
+ 0x01, 0x05,
+ 0x10, 0x03,
+ 0x05, 0x80, 0x8b,
+ 0x62, 0x1e,
+ 0x48, 0x08,
+ 0x0a, 0x80, 0xa6,
+ 0x5e, 0x22,
+ 0x45, 0x0b,
+ 0x0a, 0x06,
+ 0x0d, 0x13,
+ 0x39, 0x07,
+ 0x0a, 0x36,
+ 0x2c, 0x04,
+ 0x10, 0x80, 0xc0,
+ 0x3c, 0x64,
+ 0x53, 0x0c,
+ 0x48, 0x09,
+ 0x0a, 0x46,
+ 0x45, 0x1b,
+ 0x48, 0x08,
+ 0x53, 0x1d,
+ 0x39, 0x81, 0x07,
+ 0x46, 0x0a,
+ 0x1d, 0x03,
+ 0x47, 0x49,
+ 0x37, 0x03,
+ 0x0e, 0x08,
+ 0x0a, 0x06,
+ 0x39, 0x07,
+ 0x0a, 0x81, 0x36,
+ 0x19, 0x80, 0xb7,
+ 0x01, 0x0f,
+ 0x32, 0x0d,
+ 0x83, 0x9b, 0x66,
+ 0x75, 0x0b,
+ 0x80, 0xc4, 0x8a, 0xbc,
+ 0x84, 0x2f, 0x8f, 0xd1,
+ 0x82, 0x47, 0xa1, 0xb9,
+ 0x82, 0x39, 0x07,
+ 0x2a, 0x04,
+ 0x02, 0x60,
+ 0x26, 0x0a,
+ 0x46, 0x0a,
+ 0x28, 0x05,
+ 0x13, 0x82, 0xb0,
+ 0x5b, 0x65,
+ 0x4b, 0x04,
+ 0x39, 0x07,
+ 0x11, 0x40,
+ 0x05, 0x0b,
+ 0x02, 0x0e,
+ 0x97, 0xf8, 0x08,
+ 0x84, 0xd6, 0x2a,
+ 0x09, 0xa2, 0xf7,
+ 0x81, 0x1f, 0x31,
+ 0x03, 0x11,
+ 0x04, 0x08,
+ 0x81, 0x8c, 0x89, 0x04,
+ 0x6b, 0x05,
+ 0x0d, 0x03,
+ 0x09, 0x07,
+ 0x10, 0x93, 0x60,
+ 0x80, 0xf6, 0x0a,
+ 0x73, 0x08,
+ 0x6e, 0x17,
+ 0x46, 0x80, 0x9a,
+ 0x14, 0x0c,
+ 0x57, 0x09,
+ 0x19, 0x80, 0x87,
+ 0x81, 0x47, 0x03,
+ 0x85, 0x42, 0x0f,
+ 0x15, 0x85, 0x50,
+ 0x2b, 0x80, 0xd5,
+ 0x2d, 0x03,
+ 0x1a, 0x04,
+ 0x02, 0x81, 0x70,
+ 0x3a, 0x05,
+ 0x01, 0x85, 0x00,
+ 0x80, 0xd7, 0x29,
+ 0x4c, 0x04,
+ 0x0a, 0x04,
+ 0x02, 0x83, 0x11,
+ 0x44, 0x4c,
+ 0x3d, 0x80, 0xc2,
+ 0x3c, 0x06,
+ 0x01, 0x04,
+ 0x55, 0x05,
+ 0x1b, 0x34,
+ 0x02, 0x81, 0x0e,
+ 0x2c, 0x04,
+ 0x64, 0x0c,
+ 0x56, 0x0a,
+ 0x80, 0xae, 0x38,
+ 0x1d, 0x0d,
+ 0x2c, 0x04,
+ 0x09, 0x07,
+ 0x02, 0x0e,
+ 0x06, 0x80, 0x9a,
+ 0x83, 0xd8, 0x08,
+ 0x0d, 0x03,
+ 0x0d, 0x03,
+ 0x74, 0x0c,
+ 0x59, 0x07,
+ 0x0c, 0x14,
+ 0x0c, 0x04,
+ 0x38, 0x08,
+ 0x0a, 0x06,
+ 0x28, 0x08,
+ 0x22, 0x4e,
+ 0x81, 0x54, 0x0c,
+ 0x15, 0x03,
+ 0x03, 0x05,
+ 0x07, 0x09,
+ 0x19, 0x07,
+ 0x07, 0x09,
+ 0x03, 0x0d,
+ 0x07, 0x29,
+ 0x80, 0xcb, 0x25,
+ 0x0a, 0x84, 0x06,
+];
--- /dev/null
+///! This file is generated by src/tools/unicode-table-generator; do not edit manually!
+
+#[inline(always)]
+fn bitset_search<
+ const N: usize,
+ const CHUNK_SIZE: usize,
+ const N1: usize,
+ const CANONICAL: usize,
+ const CANONICALIZED: usize,
+>(
+ needle: u32,
+ chunk_idx_map: &[u8; N],
+ bitset_chunk_idx: &[[u8; CHUNK_SIZE]; N1],
+ bitset_canonical: &[u64; CANONICAL],
+ bitset_canonicalized: &[(u8, u8); CANONICALIZED],
+) -> bool {
+ let bucket_idx = (needle / 64) as usize;
+ let chunk_map_idx = bucket_idx / CHUNK_SIZE;
+ let chunk_piece = bucket_idx % CHUNK_SIZE;
+ let chunk_idx = if let Some(&v) = chunk_idx_map.get(chunk_map_idx) {
+ v
+ } else {
+ return false;
+ };
+ let idx = bitset_chunk_idx[chunk_idx as usize][chunk_piece] as usize;
+ let word = if let Some(word) = bitset_canonical.get(idx) {
+ *word
+ } else {
+ let (real_idx, mapping) = bitset_canonicalized[idx - bitset_canonical.len()];
+ let mut word = bitset_canonical[real_idx as usize];
+ let should_invert = mapping & (1 << 6) != 0;
+ if should_invert {
+ word = !word;
+ }
+ // Lower 6 bits
+ let quantity = mapping & ((1 << 6) - 1);
+ if mapping & (1 << 7) != 0 {
+ // shift
+ word >>= quantity as u64;
+ } else {
+ word = word.rotate_left(quantity as u32);
+ }
+ word
+ };
+ (word & (1 << (needle % 64) as u64)) != 0
+}
+
+fn decode_prefix_sum(short_offset_run_header: u32) -> u32 {
+ short_offset_run_header & ((1 << 21) - 1)
+}
+
+fn decode_length(short_offset_run_header: u32) -> usize {
+ (short_offset_run_header >> 21) as usize
+}
+
+#[inline(always)]
+fn skip_search<const SOR: usize, const OFFSETS: usize>(
+ needle: u32,
+ short_offset_runs: &[u32; SOR],
+ offsets: &[u8; OFFSETS],
+) -> bool {
+ // Note that this *cannot* be past the end of the array, as the last
+ // element is greater than std::char::MAX (the largest possible needle).
+ //
+ // So, we cannot have found it (i.e. Ok(idx) + 1 != length) and the correct
+ // location cannot be past it, so Err(idx) != length either.
+ //
+ // This means that we can avoid bounds checking for the accesses below, too.
+ let last_idx =
+ match short_offset_runs.binary_search_by_key(&(needle << 11), |header| header << 11) {
+ Ok(idx) => idx + 1,
+ Err(idx) => idx,
+ };
+
+ let mut offset_idx = decode_length(short_offset_runs[last_idx]);
+ let length = if let Some(next) = short_offset_runs.get(last_idx + 1) {
+ decode_length(*next) - offset_idx
+ } else {
+ offsets.len() - offset_idx
+ };
+ let prev =
+ last_idx.checked_sub(1).map(|prev| decode_prefix_sum(short_offset_runs[prev])).unwrap_or(0);
+
+ let total = needle - prev;
+ let mut prefix_sum = 0;
+ for _ in 0..(length - 1) {
+ let offset = offsets[offset_idx];
+ prefix_sum += offset as u32;
+ if prefix_sum > total {
+ break;
+ }
+ offset_idx += 1;
+ }
+ offset_idx % 2 == 1
+}
+
+pub const UNICODE_VERSION: (u8, u8, u8) = (13, 0, 0);
+
+#[rustfmt::skip]
+pub mod alphabetic {
+ static SHORT_OFFSET_RUNS: [u32; 52] = [
+ 706, 33559113, 868226669, 947920662, 1157637302, 1306536960, 1310732293, 1398813696,
+ 1449151936, 1451270141, 1455465613, 1459660301, 1468061604, 1648425216, 1658911342,
+ 1661009214, 1707147904, 1793132343, 1853951616, 1994464256, 2330009312, 2418090906,
+ 2428579840, 2439066671, 2441167872, 2443265607, 2445371392, 2447469113, 2449567296,
+ 2476836856, 2508295382, 2512498688, 2518790431, 2520888060, 2533473280, 2535576576,
+ 2556548774, 2634145792, 2682380992, 2715936768, 2720132608, 2736910640, 2875326464,
+ 2887952094, 2890053429, 2894253730, 2902649825, 2906847232, 2908944926, 2911043584,
+ 2913145675, 2916356939,
+ ];
+ static OFFSETS: [u8; 1391] = [
+ 65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 0, 4, 12, 14, 5, 7, 1, 1, 1, 86, 1, 42,
+ 5, 1, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 2, 1, 6, 41, 39,
+ 14, 1, 1, 1, 2, 1, 2, 1, 1, 8, 27, 4, 4, 29, 11, 5, 56, 1, 7, 14, 102, 1, 8, 4, 8, 4, 3, 10,
+ 3, 2, 1, 16, 48, 13, 101, 24, 33, 9, 2, 4, 1, 5, 24, 2, 19, 19, 25, 7, 11, 53, 21, 1, 18,
+ 12, 12, 3, 7, 6, 76, 1, 16, 1, 3, 4, 15, 13, 19, 1, 8, 2, 2, 2, 22, 1, 7, 1, 1, 3, 4, 3, 8,
+ 2, 2, 2, 2, 1, 1, 8, 1, 4, 2, 1, 5, 12, 2, 10, 1, 4, 3, 1, 6, 4, 2, 2, 22, 1, 7, 1, 2, 1, 2,
+ 1, 2, 4, 5, 4, 2, 2, 2, 4, 1, 7, 4, 1, 1, 17, 6, 11, 3, 1, 9, 1, 3, 1, 22, 1, 7, 1, 2, 1, 5,
+ 3, 9, 1, 3, 1, 2, 3, 1, 15, 4, 21, 4, 4, 3, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2, 2,
+ 2, 2, 9, 2, 4, 2, 1, 5, 13, 1, 16, 2, 1, 6, 3, 3, 1, 4, 3, 2, 1, 1, 1, 2, 3, 2, 3, 3, 3, 12,
+ 4, 5, 3, 3, 1, 3, 3, 1, 6, 1, 40, 4, 1, 8, 1, 3, 1, 23, 1, 16, 3, 8, 1, 3, 1, 3, 8, 2, 1, 3,
+ 5, 4, 28, 4, 1, 8, 1, 3, 1, 23, 1, 10, 1, 5, 3, 8, 1, 3, 1, 3, 8, 2, 7, 1, 1, 4, 13, 2, 13,
+ 13, 1, 3, 1, 41, 2, 8, 1, 3, 1, 3, 1, 1, 5, 4, 7, 5, 22, 6, 1, 3, 1, 18, 3, 24, 1, 9, 1, 1,
+ 2, 7, 8, 6, 1, 1, 1, 8, 18, 2, 13, 58, 5, 7, 6, 1, 51, 2, 1, 1, 1, 5, 1, 24, 1, 1, 1, 19, 1,
+ 3, 2, 5, 1, 1, 6, 1, 14, 4, 32, 1, 63, 8, 1, 36, 4, 17, 6, 16, 1, 36, 67, 55, 1, 1, 2, 5,
+ 16, 64, 10, 4, 2, 38, 1, 1, 5, 1, 2, 43, 1, 0, 1, 4, 2, 7, 1, 1, 1, 4, 2, 41, 1, 4, 2, 33,
+ 1, 4, 2, 7, 1, 1, 1, 4, 2, 15, 1, 57, 1, 4, 2, 67, 37, 16, 16, 86, 2, 6, 3, 0, 2, 17, 1, 26,
+ 5, 75, 3, 11, 7, 13, 1, 6, 12, 20, 12, 20, 12, 13, 1, 3, 1, 2, 12, 52, 2, 19, 14, 1, 4, 1,
+ 67, 89, 7, 43, 5, 70, 10, 31, 1, 12, 4, 9, 23, 30, 2, 5, 11, 44, 4, 26, 54, 28, 4, 63, 2,
+ 20, 50, 1, 23, 2, 63, 52, 1, 15, 1, 7, 52, 42, 2, 4, 10, 44, 1, 11, 14, 55, 22, 3, 10, 36,
+ 2, 9, 7, 43, 2, 3, 41, 4, 1, 6, 1, 2, 3, 1, 5, 192, 39, 14, 11, 0, 2, 6, 2, 38, 2, 6, 2, 8,
+ 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13, 5, 3, 1, 7, 116,
+ 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4, 1, 11, 2, 4, 5,
+ 5, 4, 1, 17, 41, 0, 52, 0, 47, 1, 47, 1, 133, 6, 4, 3, 2, 12, 38, 1, 1, 5, 1, 2, 56, 7, 1,
+ 16, 23, 9, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 32, 47, 1, 0, 3, 25, 9, 7, 5, 2,
+ 5, 4, 86, 6, 3, 1, 90, 1, 4, 5, 43, 1, 94, 17, 32, 48, 16, 0, 0, 64, 0, 3, 0, 67, 46, 2, 0,
+ 3, 16, 10, 2, 20, 47, 5, 8, 3, 113, 39, 9, 2, 103, 2, 53, 2, 9, 42, 17, 1, 33, 24, 52, 12,
+ 68, 1, 1, 44, 6, 3, 1, 1, 3, 10, 33, 5, 35, 13, 29, 3, 51, 1, 12, 15, 1, 16, 16, 10, 5, 1,
+ 55, 9, 14, 18, 23, 3, 69, 1, 1, 1, 1, 24, 3, 2, 16, 2, 4, 11, 6, 2, 6, 2, 6, 9, 7, 1, 7, 1,
+ 43, 1, 14, 6, 123, 21, 0, 12, 23, 4, 49, 0, 0, 2, 106, 38, 7, 12, 5, 5, 12, 1, 13, 1, 5, 1,
+ 1, 1, 2, 1, 2, 1, 108, 33, 0, 18, 64, 2, 54, 40, 12, 116, 5, 1, 135, 36, 26, 6, 26, 11, 89,
+ 3, 6, 2, 6, 2, 6, 2, 3, 35, 12, 1, 26, 1, 19, 1, 2, 1, 15, 2, 14, 34, 123, 69, 53, 0, 29, 3,
+ 49, 47, 32, 13, 30, 5, 43, 5, 30, 2, 36, 4, 8, 1, 5, 42, 158, 18, 36, 4, 36, 4, 40, 8, 52,
+ 156, 0, 9, 22, 10, 8, 152, 6, 2, 1, 1, 44, 1, 2, 3, 1, 2, 23, 10, 23, 9, 31, 65, 19, 1, 2,
+ 10, 22, 10, 26, 70, 56, 6, 2, 64, 4, 1, 2, 5, 8, 1, 3, 1, 29, 42, 29, 3, 29, 35, 8, 1, 28,
+ 27, 54, 10, 22, 10, 19, 13, 18, 110, 73, 55, 51, 13, 51, 13, 40, 0, 42, 1, 2, 3, 2, 78, 29,
+ 10, 1, 8, 22, 106, 21, 27, 23, 9, 70, 60, 55, 23, 25, 23, 51, 17, 4, 8, 35, 3, 1, 9, 64, 1,
+ 4, 9, 2, 10, 1, 1, 1, 35, 18, 1, 34, 2, 1, 6, 1, 65, 7, 1, 1, 1, 4, 1, 15, 1, 10, 7, 57, 23,
+ 4, 1, 8, 2, 2, 2, 22, 1, 7, 1, 2, 1, 5, 3, 8, 2, 2, 2, 2, 3, 1, 6, 1, 5, 7, 156, 66, 1, 3,
+ 1, 4, 20, 3, 30, 66, 2, 2, 1, 1, 184, 54, 2, 7, 25, 6, 34, 63, 1, 1, 3, 1, 59, 54, 2, 1, 71,
+ 27, 2, 14, 213, 57, 103, 64, 31, 8, 2, 1, 2, 8, 1, 2, 1, 30, 1, 2, 2, 2, 2, 4, 93, 8, 2, 46,
+ 2, 6, 1, 1, 1, 2, 27, 51, 2, 10, 17, 72, 5, 1, 34, 57, 0, 9, 1, 45, 1, 7, 1, 1, 49, 30, 2,
+ 22, 1, 14, 73, 7, 1, 2, 1, 44, 3, 1, 1, 2, 1, 3, 1, 1, 2, 2, 24, 6, 1, 2, 1, 37, 1, 2, 1, 4,
+ 1, 1, 0, 23, 185, 1, 79, 0, 102, 111, 17, 196, 0, 0, 0, 0, 0, 0, 7, 31, 113, 30, 18, 48, 16,
+ 4, 31, 21, 5, 19, 0, 64, 128, 75, 4, 57, 7, 17, 64, 2, 1, 1, 12, 2, 14, 0, 8, 0, 42, 9, 0,
+ 0, 49, 3, 17, 4, 8, 0, 0, 107, 5, 13, 3, 9, 7, 10, 4, 1, 0, 85, 1, 71, 1, 2, 2, 1, 2, 2, 2,
+ 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3, 7, 1, 0, 2, 25,
+ 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 7, 1, 17, 2, 7, 1,
+ 2, 1, 5, 213, 45, 10, 7, 16, 1, 0, 44, 0, 197, 59, 68, 3, 1, 3, 1, 0, 4, 1, 27, 1, 2, 1, 1,
+ 2, 1, 1, 10, 1, 4, 1, 1, 1, 1, 6, 1, 4, 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 2, 1, 1, 2, 4, 1, 7, 1, 4, 1, 4, 1, 1, 1, 10, 1, 17, 5, 3, 1, 5, 1, 17, 0,
+ 26, 6, 26, 6, 26, 0, 0, 34, 0, 11, 222, 2, 0, 14, 0, 0, 0, 0, 0, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod case_ignorable {
+ static SHORT_OFFSET_RUNS: [u32; 32] = [
+ 688, 44045149, 555751186, 559947709, 794831996, 866136069, 891330581, 916497656, 920692236,
+ 924908318, 1122041344, 1130430973, 1193347585, 1205931300, 1231097515, 1235294255,
+ 1445009723, 1453399088, 1512120051, 1575040048, 1579248368, 1583443791, 1596046493,
+ 1612829031, 1621219840, 1642192896, 1667359024, 1688330988, 1692526800, 1696723963,
+ 1705902081, 1711210992,
+ ];
+ static OFFSETS: [u8; 821] = [
+ 39, 1, 6, 1, 11, 1, 35, 1, 1, 1, 71, 1, 4, 1, 1, 1, 4, 1, 2, 2, 0, 192, 4, 2, 4, 1, 9, 2,
+ 1, 1, 251, 7, 207, 1, 5, 1, 49, 45, 1, 1, 1, 2, 1, 2, 1, 1, 44, 1, 11, 6, 10, 11, 1, 1, 35,
+ 1, 10, 21, 16, 1, 101, 8, 1, 10, 1, 4, 33, 1, 1, 1, 30, 27, 91, 11, 58, 11, 4, 1, 2, 1, 24,
+ 24, 43, 3, 119, 48, 55, 1, 1, 1, 4, 8, 4, 1, 3, 7, 10, 2, 13, 1, 15, 1, 58, 1, 4, 4, 8, 1,
+ 20, 2, 26, 1, 2, 2, 57, 1, 4, 2, 4, 2, 2, 3, 3, 1, 30, 2, 3, 1, 11, 2, 57, 1, 4, 5, 1, 2, 4,
+ 1, 20, 2, 22, 6, 1, 1, 58, 1, 2, 1, 1, 4, 8, 1, 7, 2, 11, 2, 30, 1, 61, 1, 12, 1, 50, 1, 3,
+ 1, 57, 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 6, 1, 5, 2, 20, 2, 28, 2, 57, 2, 4,
+ 4, 8, 1, 20, 2, 29, 1, 72, 1, 7, 3, 1, 1, 90, 1, 2, 7, 11, 9, 98, 1, 2, 9, 9, 1, 1, 6, 74,
+ 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1, 102, 4, 1, 6, 1, 2, 2, 2, 25,
+ 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 94, 1, 0, 3, 0, 3, 29, 3, 29, 2, 30, 2, 64, 2, 1,
+ 7, 8, 1, 2, 11, 3, 1, 5, 1, 45, 4, 52, 1, 65, 2, 34, 1, 118, 3, 4, 2, 9, 1, 6, 3, 219, 2, 2,
+ 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 39, 1, 8, 17, 63, 4, 48, 1, 1, 5, 1, 1, 5, 1,
+ 40, 9, 12, 2, 32, 4, 2, 2, 1, 3, 56, 1, 1, 2, 3, 1, 1, 3, 58, 8, 2, 2, 64, 6, 82, 3, 1, 13,
+ 1, 7, 4, 1, 6, 1, 3, 2, 50, 63, 13, 1, 34, 95, 1, 5, 0, 1, 1, 3, 11, 3, 13, 3, 13, 3, 13, 2,
+ 12, 5, 8, 2, 10, 1, 2, 1, 2, 5, 49, 5, 1, 10, 1, 1, 13, 1, 16, 13, 51, 33, 0, 2, 113, 3,
+ 125, 1, 15, 1, 96, 32, 47, 1, 0, 1, 36, 4, 3, 5, 5, 1, 93, 6, 93, 3, 0, 1, 0, 6, 0, 1, 98,
+ 4, 1, 10, 1, 1, 28, 4, 80, 2, 14, 34, 78, 1, 23, 3, 109, 2, 8, 1, 3, 1, 4, 1, 25, 2, 5, 1,
+ 151, 2, 26, 18, 13, 1, 38, 8, 25, 11, 46, 3, 48, 1, 2, 4, 2, 2, 17, 1, 21, 2, 66, 6, 2, 2,
+ 2, 2, 12, 1, 8, 1, 35, 1, 11, 1, 51, 1, 1, 3, 2, 2, 5, 2, 1, 1, 27, 1, 14, 2, 5, 2, 1, 1,
+ 100, 5, 9, 3, 121, 1, 2, 1, 4, 1, 0, 1, 147, 16, 0, 16, 3, 1, 12, 16, 34, 1, 2, 1, 169, 1,
+ 7, 1, 6, 1, 11, 1, 35, 1, 1, 1, 47, 1, 45, 2, 67, 1, 21, 3, 0, 1, 226, 1, 149, 5, 0, 3, 1,
+ 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 153, 11, 176, 1, 54, 15, 56, 3, 49, 4, 2, 2, 2, 1,
+ 15, 1, 50, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 160,
+ 1, 3, 8, 21, 2, 57, 2, 3, 1, 37, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 84, 6, 1, 1, 4, 2, 1,
+ 2, 238, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1, 1, 101, 3, 2, 4, 1,
+ 5, 0, 9, 1, 2, 0, 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10, 40, 6, 2, 4, 8, 1, 9, 6, 2, 3,
+ 46, 13, 1, 2, 0, 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2, 122, 6, 3, 1, 1, 2, 1, 7, 1, 1,
+ 72, 2, 3, 1, 1, 1, 0, 2, 0, 9, 0, 5, 59, 7, 9, 4, 0, 1, 63, 17, 64, 2, 1, 2, 0, 2, 1, 4, 0,
+ 3, 9, 16, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0, 7, 1, 17, 2, 7,
+ 1, 2, 1, 5, 0, 14, 0, 4, 0, 7, 109, 8, 0, 5, 0, 1, 30, 96, 128, 240, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod cased {
+ static SHORT_OFFSET_RUNS: [u32; 19] = [
+ 4256, 115348384, 136322176, 144711446, 163587254, 320875520, 325101120, 358656816,
+ 392231680, 404815649, 413205504, 421596288, 434182304, 442592832, 446813184, 451008166,
+ 528607488, 576844080, 582152586,
+ ];
+ static OFFSETS: [u8; 283] = [
+ 65, 26, 6, 26, 47, 1, 10, 1, 4, 1, 5, 23, 1, 31, 1, 195, 1, 4, 4, 208, 1, 36, 7, 2, 30, 5,
+ 96, 1, 42, 4, 2, 2, 2, 4, 1, 1, 6, 1, 1, 3, 1, 1, 1, 20, 1, 83, 1, 139, 8, 166, 1, 38, 9,
+ 41, 0, 38, 1, 1, 5, 1, 2, 43, 2, 3, 0, 86, 2, 6, 0, 9, 7, 43, 2, 3, 64, 192, 64, 0, 2, 6, 2,
+ 38, 2, 6, 2, 8, 1, 1, 1, 1, 1, 1, 1, 31, 2, 53, 1, 7, 1, 1, 3, 3, 1, 7, 3, 4, 2, 6, 4, 13,
+ 5, 3, 1, 7, 116, 1, 13, 1, 16, 13, 101, 1, 4, 1, 2, 10, 1, 1, 3, 5, 6, 1, 1, 1, 1, 1, 1, 4,
+ 1, 6, 4, 1, 2, 4, 5, 5, 4, 1, 17, 32, 3, 2, 0, 52, 0, 47, 1, 47, 1, 133, 6, 4, 3, 2, 12, 38,
+ 1, 1, 5, 1, 0, 46, 18, 30, 132, 102, 3, 4, 1, 48, 2, 9, 42, 2, 1, 3, 0, 43, 1, 13, 7, 80, 0,
+ 7, 12, 5, 0, 26, 6, 26, 0, 80, 96, 36, 4, 36, 0, 51, 13, 51, 0, 64, 0, 64, 0, 85, 1, 71, 1,
+ 2, 2, 1, 2, 2, 2, 4, 1, 12, 1, 1, 1, 7, 1, 65, 1, 4, 2, 8, 1, 7, 1, 28, 1, 4, 1, 5, 1, 1, 3,
+ 7, 1, 0, 2, 25, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 31, 1, 25, 1, 8, 0, 68,
+ 0, 26, 6, 26, 6, 26, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod cc {
+ static SHORT_OFFSET_RUNS: [u32; 1] = [
+ 1114272,
+ ];
+ static OFFSETS: [u8; 5] = [
+ 0, 32, 95, 33, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod grapheme_extend {
+ static SHORT_OFFSET_RUNS: [u32; 31] = [
+ 768, 2098307, 6292881, 10490717, 513808146, 518004748, 723528943, 731918378, 744531567,
+ 752920578, 769719070, 899743232, 903937950, 912327165, 916523521, 929107236, 954273451,
+ 958470191, 1180769328, 1252073203, 1315007216, 1319202639, 1327611037, 1340199269,
+ 1344395776, 1373757440, 1398923568, 1419895532, 1424091344, 1429078048, 1438581232,
+ ];
+ static OFFSETS: [u8; 689] = [
+ 0, 112, 0, 7, 0, 45, 1, 1, 1, 2, 1, 2, 1, 1, 72, 11, 48, 21, 16, 1, 101, 7, 2, 6, 2, 2, 1,
+ 4, 35, 1, 30, 27, 91, 11, 58, 9, 9, 1, 24, 4, 1, 9, 1, 3, 1, 5, 43, 3, 119, 15, 1, 32, 55,
+ 1, 1, 1, 4, 8, 4, 1, 3, 7, 10, 2, 29, 1, 58, 1, 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 26, 1, 2, 2,
+ 57, 1, 4, 2, 4, 2, 2, 3, 3, 1, 30, 2, 3, 1, 11, 2, 57, 1, 4, 5, 1, 2, 4, 1, 20, 2, 22, 6, 1,
+ 1, 58, 1, 1, 2, 1, 4, 8, 1, 7, 3, 10, 2, 30, 1, 59, 1, 1, 1, 12, 1, 9, 1, 40, 1, 3, 1, 57,
+ 3, 5, 3, 1, 4, 7, 2, 11, 2, 29, 1, 58, 1, 2, 1, 2, 1, 3, 1, 5, 2, 7, 2, 11, 2, 28, 2, 57, 2,
+ 1, 1, 2, 4, 8, 1, 9, 1, 10, 2, 29, 1, 72, 1, 4, 1, 2, 3, 1, 1, 8, 1, 81, 1, 2, 7, 12, 8, 98,
+ 1, 2, 9, 11, 6, 74, 2, 27, 1, 1, 1, 1, 1, 55, 14, 1, 5, 1, 2, 5, 11, 1, 36, 9, 1, 102, 4, 1,
+ 6, 1, 2, 2, 2, 25, 2, 4, 3, 16, 4, 13, 1, 2, 2, 6, 1, 15, 1, 0, 3, 0, 3, 29, 3, 29, 2, 30,
+ 2, 64, 2, 1, 7, 8, 1, 2, 11, 9, 1, 45, 3, 119, 2, 34, 1, 118, 3, 4, 2, 9, 1, 6, 3, 219, 2,
+ 2, 1, 58, 1, 1, 7, 1, 1, 1, 1, 2, 8, 6, 10, 2, 1, 48, 17, 63, 4, 48, 7, 1, 1, 5, 1, 40, 9,
+ 12, 2, 32, 4, 2, 2, 1, 3, 56, 1, 1, 2, 3, 1, 1, 3, 58, 8, 2, 2, 152, 3, 1, 13, 1, 7, 4, 1,
+ 6, 1, 3, 2, 198, 58, 1, 5, 0, 1, 195, 33, 0, 3, 141, 1, 96, 32, 0, 6, 105, 2, 0, 4, 1, 10,
+ 32, 2, 80, 2, 0, 1, 3, 1, 4, 1, 25, 2, 5, 1, 151, 2, 26, 18, 13, 1, 38, 8, 25, 11, 46, 3,
+ 48, 1, 2, 4, 2, 2, 39, 1, 67, 6, 2, 2, 2, 2, 12, 1, 8, 1, 47, 1, 51, 1, 1, 3, 2, 2, 5, 2, 1,
+ 1, 42, 2, 8, 1, 238, 1, 2, 1, 4, 1, 0, 1, 0, 16, 16, 16, 0, 2, 0, 1, 226, 1, 149, 5, 0, 3,
+ 1, 2, 5, 4, 40, 3, 4, 1, 165, 2, 0, 4, 0, 2, 153, 11, 176, 1, 54, 15, 56, 3, 49, 4, 2, 2,
+ 69, 3, 36, 5, 1, 8, 62, 1, 12, 2, 52, 9, 10, 4, 2, 1, 95, 3, 2, 1, 1, 2, 6, 1, 160, 1, 3, 8,
+ 21, 2, 57, 2, 1, 1, 1, 1, 22, 1, 14, 7, 3, 5, 195, 8, 2, 3, 1, 1, 23, 1, 81, 1, 2, 6, 1, 1,
+ 2, 1, 1, 2, 1, 2, 235, 1, 2, 4, 6, 2, 1, 2, 27, 2, 85, 8, 2, 1, 1, 2, 106, 1, 1, 1, 2, 6, 1,
+ 1, 101, 3, 2, 4, 1, 5, 0, 9, 1, 2, 245, 1, 10, 2, 1, 1, 4, 1, 144, 4, 2, 2, 4, 1, 32, 10,
+ 40, 6, 2, 4, 8, 1, 9, 6, 2, 3, 46, 13, 1, 2, 0, 7, 1, 6, 1, 1, 82, 22, 2, 7, 1, 2, 1, 2,
+ 122, 6, 3, 1, 1, 2, 1, 7, 1, 1, 72, 2, 3, 1, 1, 1, 0, 2, 0, 5, 59, 7, 0, 1, 63, 4, 81, 1, 0,
+ 2, 0, 1, 1, 3, 4, 5, 8, 8, 2, 7, 30, 4, 148, 3, 0, 55, 4, 50, 8, 1, 14, 1, 22, 5, 1, 15, 0,
+ 7, 1, 17, 2, 7, 1, 2, 1, 5, 0, 7, 0, 4, 0, 7, 109, 7, 0, 96, 128, 240, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod lowercase {
+ static BITSET_CHUNKS_MAP: [u8; 123] = [
+ 13, 16, 0, 0, 8, 0, 0, 11, 12, 9, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 3, 1, 0, 14, 0, 7, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0,
+ 0, 0, 6,
+ ];
+ static BITSET_INDEX_CHUNKS: [[u8; 16]; 18] = [
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 14, 52, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 62, 39, 0, 47, 43, 45, 30],
+ [0, 0, 0, 0, 10, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26],
+ [0, 0, 0, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 54, 0, 52, 52, 52, 0, 21, 21, 64, 21, 33, 24, 23, 34],
+ [0, 5, 71, 0, 28, 15, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 61, 31, 17, 22, 48, 49, 44, 42, 8, 32, 38, 0, 27, 13, 29],
+ [11, 55, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [16, 25, 21, 35, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [16, 46, 2, 20, 63, 9, 54, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [60, 37, 51, 12, 70, 58, 18, 1, 6, 59, 68, 19, 65, 66, 3, 41],
+ ];
+ static BITSET_CANONICAL: [u64; 52] = [
+ 0b0000000000000000000000000000000000000000000000000000000000000000,
+ 0b1111111111111111110000000000000000000000000011111111111111111111,
+ 0b1010101010101010101010101010101010101010101010101010100000000010,
+ 0b1111111111111111111111000000000000000000000000001111110111111111,
+ 0b0000111111111111111111111111111111111111000000000000000000000000,
+ 0b1000000000000010000000000000000000000000000000000000000000000000,
+ 0b0000111111111111111111111111110000000000000000000000000011111111,
+ 0b0000000000000111111111111111111111111111111111111111111111111111,
+ 0b1111111111111111111111111111111111111111111111111010101010000101,
+ 0b1111111111111111111111111111111100000000000000000000000000000000,
+ 0b1111111111111111111111111111110000000000000000000000000000000000,
+ 0b1111111111111111111111110000000000000000000000000000000000000000,
+ 0b1111111111111111111111000000000000000000000000001111111111101111,
+ 0b1111111111111111111100000000000000000000000000010000000000000000,
+ 0b1111111111111111000000011111111111110111111111111111111111111111,
+ 0b1111111111111111000000000000000000000000000000000100001111000000,
+ 0b1111111111111111000000000000000000000000000000000000000000000000,
+ 0b1111111101111111111111111111111110000000000000000000000000000000,
+ 0b1111110000000000000000000000000011111111111111111111111111000000,
+ 0b1111000000000000000000000000001111110111111111111111111111111100,
+ 0b1010101010101010101010101010101010101010101010101101010101010100,
+ 0b1010101010101010101010101010101010101010101010101010101010101010,
+ 0b0101010110101010101010101010101010101010101010101010101010101010,
+ 0b0100000011011111000000001111111100000000111111110000000011111111,
+ 0b0011111111111111000000001111111100000000111111110000000000111111,
+ 0b0011111111011010000101010110001001111111111111111111111111111111,
+ 0b0011111100000000000000000000000000000000000000000000000000000000,
+ 0b0011110010001010000000000000000000000000000000000000000000100000,
+ 0b0011001000010000100000000000000000000000000010001100010000000000,
+ 0b0001100100101111101010101010101010101010111000110111111111111111,
+ 0b0000011101000000000000000000000000000000000000000000010100001000,
+ 0b0000010000100000000001000000000000000000000000000000000000000000,
+ 0b0000000111111111111111111111111111111111111011111111111111111111,
+ 0b0000000011111111000000001111111100000000001111110000000011111111,
+ 0b0000000011011100000000001111111100000000110011110000000011011100,
+ 0b0000000000001000010100000001101010101010101010101010101010101010,
+ 0b0000000000000000001000001011111111111111111111111111111111111111,
+ 0b0000000000000000000000001111111111111111110111111100000000000000,
+ 0b0000000000000000000000000001111100000000000000000000000000000011,
+ 0b0000000000000000000000000000000000111010101010101010101010101010,
+ 0b0000000000000000000000000000000000000000111110000000000001111111,
+ 0b0000000000000000000000000000000000000000000000000000101111110111,
+ 0b1001001111111010101010101010101010101010101010101010101010101010,
+ 0b1001010111111111101010101010101010101010101010101010101010101010,
+ 0b1010101000101001101010101010101010110101010101010101001001000000,
+ 0b1010101010100000100000101010101010101010101110100101000010101010,
+ 0b1010101010101010101010101010101011111111111111111111111111111111,
+ 0b1010101010101011101010101010100000000000000000000000000000000000,
+ 0b1101010010101010101010101010101010101010101010101010101101010101,
+ 0b1110011001010001001011010010101001001110001001000011000100101001,
+ 0b1110011111111111111111111111111111111111111111110000000000000000,
+ 0b1110101111000000000000000000000000001111111111111111111111111100,
+ ];
+ static BITSET_MAPPING: [(u8, u8); 20] = [
+ (0, 64), (1, 188), (1, 183), (1, 176), (1, 109), (1, 124), (1, 126), (1, 66), (1, 70),
+ (1, 77), (2, 146), (2, 144), (2, 83), (3, 12), (3, 6), (4, 156), (4, 78), (5, 187),
+ (6, 132), (7, 93),
+ ];
+
+ pub fn lookup(c: char) -> bool {
+ super::bitset_search(
+ c as u32,
+ &BITSET_CHUNKS_MAP,
+ &BITSET_INDEX_CHUNKS,
+ &BITSET_CANONICAL,
+ &BITSET_MAPPING,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod n {
+ static SHORT_OFFSET_RUNS: [u32; 38] = [
+ 1632, 18876774, 31461440, 102765417, 111154926, 115349830, 132128880, 165684320, 186656630,
+ 195046653, 199241735, 203436434, 216049184, 241215536, 249605104, 274792208, 278987015,
+ 283181793, 295766104, 320933114, 383848032, 392238160, 434181712, 442570976, 455154768,
+ 463544256, 476128256, 480340576, 484535936, 497144544, 501340110, 509731136, 513925872,
+ 518121671, 522316913, 530706688, 551681008, 556989434,
+ ];
+ static OFFSETS: [u8; 267] = [
+ 48, 10, 120, 2, 5, 1, 2, 3, 0, 10, 134, 10, 198, 10, 0, 10, 118, 10, 4, 6, 108, 10, 118,
+ 10, 118, 10, 2, 6, 110, 13, 115, 10, 8, 7, 103, 10, 104, 7, 7, 19, 109, 10, 96, 10, 118, 10,
+ 70, 20, 0, 10, 70, 10, 0, 20, 0, 3, 239, 10, 6, 10, 22, 10, 0, 10, 128, 11, 165, 10, 6, 10,
+ 182, 10, 86, 10, 134, 10, 6, 10, 0, 1, 3, 6, 6, 10, 198, 51, 2, 5, 0, 60, 78, 22, 0, 30, 0,
+ 1, 0, 1, 25, 9, 14, 3, 0, 4, 138, 10, 30, 8, 1, 15, 32, 10, 39, 15, 0, 10, 188, 10, 0, 6,
+ 154, 10, 38, 10, 198, 10, 22, 10, 86, 10, 0, 10, 0, 10, 0, 45, 12, 57, 17, 2, 0, 27, 36, 4,
+ 29, 1, 8, 1, 134, 5, 202, 10, 0, 8, 25, 7, 39, 9, 75, 5, 22, 6, 160, 2, 2, 16, 2, 46, 64, 9,
+ 52, 2, 30, 3, 75, 5, 104, 8, 24, 8, 41, 7, 0, 6, 48, 10, 0, 31, 158, 10, 42, 4, 112, 7, 134,
+ 30, 128, 10, 60, 10, 144, 10, 7, 20, 251, 10, 0, 10, 118, 10, 0, 10, 102, 10, 102, 12, 0,
+ 19, 93, 10, 0, 29, 227, 10, 70, 10, 0, 21, 0, 111, 0, 10, 230, 10, 1, 7, 0, 23, 0, 20, 108,
+ 25, 0, 50, 0, 10, 0, 10, 0, 9, 128, 10, 0, 59, 1, 3, 1, 4, 76, 45, 1, 15, 0, 13, 0, 10, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod uppercase {
+ static BITSET_CHUNKS_MAP: [u8; 125] = [
+ 12, 15, 5, 5, 0, 5, 5, 2, 4, 11, 5, 14, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 8, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 6, 5, 13, 5, 10, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 7, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 16, 5, 5,
+ 5, 5, 9, 5, 3,
+ ];
+ static BITSET_INDEX_CHUNKS: [[u8; 16]; 17] = [
+ [41, 41, 5, 33, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 5, 0],
+ [41, 41, 5, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 41, 38, 41, 41, 41, 41, 41, 17, 17, 61, 17, 40, 29, 24, 23],
+ [41, 41, 41, 41, 9, 8, 42, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 41, 41, 41, 35, 28, 65, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 56, 41, 41, 41],
+ [41, 41, 41, 41, 41, 41, 41, 41, 41, 46, 41, 41, 41, 41, 41, 41],
+ [41, 41, 41, 41, 41, 41, 41, 41, 41, 60, 59, 41, 20, 14, 16, 4],
+ [41, 41, 41, 41, 47, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 41, 51, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 41, 52, 43, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [41, 53, 41, 31, 34, 21, 22, 15, 13, 32, 41, 41, 41, 11, 30, 37],
+ [48, 41, 9, 44, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [49, 36, 17, 27, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [50, 19, 2, 18, 10, 45, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41],
+ [57, 1, 26, 54, 12, 7, 25, 55, 39, 58, 6, 3, 64, 63, 62, 66],
+ ];
+ static BITSET_CANONICAL: [u64; 41] = [
+ 0b0000000000111111111111111111111111111111111111111111111111111111,
+ 0b1111111111111111111111110000000000000000000000000011111111111111,
+ 0b0101010101010101010101010101010101010101010101010101010000000001,
+ 0b0000011111111111111111111111110000000000000000000000000000000001,
+ 0b0000000000100000000000000000000000000000000000000000001011110100,
+ 0b1111111111111111111111111111111100000000000000000000000000000000,
+ 0b1111111111111111111111110000000000000000000000000000001111111111,
+ 0b1111111111111111111100000000000000000000000000011111110001011111,
+ 0b1111111111111111000000111111111111111111111111110000001111111111,
+ 0b1111111111111111000000000000000000000000000000000000000000000000,
+ 0b1111111111111110010101010101010101010101010101010101010101010101,
+ 0b1000000001000101000000000000000000000000000000000000000000000000,
+ 0b0111101100000000000000000000000000011111110111111110011110110000,
+ 0b0110110000000101010101010101010101010101010101010101010101010101,
+ 0b0110101000000000010101010101010101010101010101010101010101010101,
+ 0b0101010111010010010101010101010101001010101010101010010010010000,
+ 0b0101010101011111011111010101010101010101010001010010100001010101,
+ 0b0101010101010101010101010101010101010101010101010101010101010101,
+ 0b0101010101010101010101010101010101010101010101010010101010101011,
+ 0b0101010101010101010101010101010100000000000000000000000000000000,
+ 0b0101010101010100010101010101010000000000000000000000000000000000,
+ 0b0010101101010101010101010101010101010101010101010101010010101010,
+ 0b0001000110101110110100101101010110110001110110111100111011010110,
+ 0b0000111100000000000111110000000000001111000000000000111100000000,
+ 0b0000111100000000000000000000000000000000000000000000000000000000,
+ 0b0000001111111111111111111111111100000000000000000000000000111111,
+ 0b0000000000111111110111100110010011010000000000000000000000000011,
+ 0b0000000000000100001010000000010101010101010101010101010101010101,
+ 0b0000000000000000111111111111111100000000000000000000000000100000,
+ 0b0000000000000000111111110000000010101010000000000011111100000000,
+ 0b0000000000000000000011111111101111111111111111101101011101000000,
+ 0b0000000000000000000000000000000001111111011111111111111111111111,
+ 0b0000000000000000000000000000000000000000000000000101010101111010,
+ 0b0000000000000000000000000000000000000000000000000010000010111111,
+ 0b1010101001010101010101010101010101010101010101010101010101010101,
+ 0b1100000000001111001111010101000000111110001001110011100010000100,
+ 0b1100000000100101111010101001110100000000000000000000000000000000,
+ 0b1110011010010000010101010101010101010101000111001000000000000000,
+ 0b1110011111111111111111111111111111111111111111110000000000000000,
+ 0b1111000000000000000000000000001111111111111111111111111100000000,
+ 0b1111111100000000111111110000000000111111000000001111111100000000,
+ ];
+ static BITSET_MAPPING: [(u8, u8); 26] = [
+ (0, 182), (0, 74), (0, 166), (0, 162), (0, 159), (0, 150), (0, 148), (0, 142), (0, 135),
+ (0, 134), (0, 131), (0, 64), (1, 115), (1, 66), (1, 70), (1, 83), (1, 12), (1, 8), (2, 164),
+ (2, 146), (2, 20), (3, 146), (3, 140), (3, 134), (4, 178), (4, 171),
+ ];
+
+ pub fn lookup(c: char) -> bool {
+ super::bitset_search(
+ c as u32,
+ &BITSET_CHUNKS_MAP,
+ &BITSET_INDEX_CHUNKS,
+ &BITSET_CANONICAL,
+ &BITSET_MAPPING,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod white_space {
+ static SHORT_OFFSET_RUNS: [u32; 4] = [
+ 5760, 18882560, 23080960, 40972289,
+ ];
+ static OFFSETS: [u8; 21] = [
+ 9, 5, 18, 1, 100, 1, 26, 1, 0, 1, 0, 11, 29, 2, 5, 1, 47, 1, 0, 1, 0,
+ ];
+ pub fn lookup(c: char) -> bool {
+ super::skip_search(
+ c as u32,
+ &SHORT_OFFSET_RUNS,
+ &OFFSETS,
+ )
+ }
+}
+
+#[rustfmt::skip]
+pub mod conversions {
+ pub fn to_lower(c: char) -> [char; 3] {
+ match bsearch_case_table(c, LOWERCASE_TABLE) {
+ None => [c, '\0', '\0'],
+ Some(index) => LOWERCASE_TABLE[index].1,
+ }
+ }
+
+ pub fn to_upper(c: char) -> [char; 3] {
+ match bsearch_case_table(c, UPPERCASE_TABLE) {
+ None => [c, '\0', '\0'],
+ Some(index) => UPPERCASE_TABLE[index].1,
+ }
+ }
+
+ fn bsearch_case_table(c: char, table: &[(char, [char; 3])]) -> Option<usize> {
+ table.binary_search_by(|&(key, _)| key.cmp(&c)).ok()
+ }
+ static LOWERCASE_TABLE: &[(char, [char; 3])] = &[
+ ('A', ['a', '\u{0}', '\u{0}']), ('B', ['b', '\u{0}', '\u{0}']),
+ ('C', ['c', '\u{0}', '\u{0}']), ('D', ['d', '\u{0}', '\u{0}']),
+ ('E', ['e', '\u{0}', '\u{0}']), ('F', ['f', '\u{0}', '\u{0}']),
+ ('G', ['g', '\u{0}', '\u{0}']), ('H', ['h', '\u{0}', '\u{0}']),
+ ('I', ['i', '\u{0}', '\u{0}']), ('J', ['j', '\u{0}', '\u{0}']),
+ ('K', ['k', '\u{0}', '\u{0}']), ('L', ['l', '\u{0}', '\u{0}']),
+ ('M', ['m', '\u{0}', '\u{0}']), ('N', ['n', '\u{0}', '\u{0}']),
+ ('O', ['o', '\u{0}', '\u{0}']), ('P', ['p', '\u{0}', '\u{0}']),
+ ('Q', ['q', '\u{0}', '\u{0}']), ('R', ['r', '\u{0}', '\u{0}']),
+ ('S', ['s', '\u{0}', '\u{0}']), ('T', ['t', '\u{0}', '\u{0}']),
+ ('U', ['u', '\u{0}', '\u{0}']), ('V', ['v', '\u{0}', '\u{0}']),
+ ('W', ['w', '\u{0}', '\u{0}']), ('X', ['x', '\u{0}', '\u{0}']),
+ ('Y', ['y', '\u{0}', '\u{0}']), ('Z', ['z', '\u{0}', '\u{0}']),
+ ('\u{c0}', ['\u{e0}', '\u{0}', '\u{0}']), ('\u{c1}', ['\u{e1}', '\u{0}', '\u{0}']),
+ ('\u{c2}', ['\u{e2}', '\u{0}', '\u{0}']), ('\u{c3}', ['\u{e3}', '\u{0}', '\u{0}']),
+ ('\u{c4}', ['\u{e4}', '\u{0}', '\u{0}']), ('\u{c5}', ['\u{e5}', '\u{0}', '\u{0}']),
+ ('\u{c6}', ['\u{e6}', '\u{0}', '\u{0}']), ('\u{c7}', ['\u{e7}', '\u{0}', '\u{0}']),
+ ('\u{c8}', ['\u{e8}', '\u{0}', '\u{0}']), ('\u{c9}', ['\u{e9}', '\u{0}', '\u{0}']),
+ ('\u{ca}', ['\u{ea}', '\u{0}', '\u{0}']), ('\u{cb}', ['\u{eb}', '\u{0}', '\u{0}']),
+ ('\u{cc}', ['\u{ec}', '\u{0}', '\u{0}']), ('\u{cd}', ['\u{ed}', '\u{0}', '\u{0}']),
+ ('\u{ce}', ['\u{ee}', '\u{0}', '\u{0}']), ('\u{cf}', ['\u{ef}', '\u{0}', '\u{0}']),
+ ('\u{d0}', ['\u{f0}', '\u{0}', '\u{0}']), ('\u{d1}', ['\u{f1}', '\u{0}', '\u{0}']),
+ ('\u{d2}', ['\u{f2}', '\u{0}', '\u{0}']), ('\u{d3}', ['\u{f3}', '\u{0}', '\u{0}']),
+ ('\u{d4}', ['\u{f4}', '\u{0}', '\u{0}']), ('\u{d5}', ['\u{f5}', '\u{0}', '\u{0}']),
+ ('\u{d6}', ['\u{f6}', '\u{0}', '\u{0}']), ('\u{d8}', ['\u{f8}', '\u{0}', '\u{0}']),
+ ('\u{d9}', ['\u{f9}', '\u{0}', '\u{0}']), ('\u{da}', ['\u{fa}', '\u{0}', '\u{0}']),
+ ('\u{db}', ['\u{fb}', '\u{0}', '\u{0}']), ('\u{dc}', ['\u{fc}', '\u{0}', '\u{0}']),
+ ('\u{dd}', ['\u{fd}', '\u{0}', '\u{0}']), ('\u{de}', ['\u{fe}', '\u{0}', '\u{0}']),
+ ('\u{100}', ['\u{101}', '\u{0}', '\u{0}']), ('\u{102}', ['\u{103}', '\u{0}', '\u{0}']),
+ ('\u{104}', ['\u{105}', '\u{0}', '\u{0}']), ('\u{106}', ['\u{107}', '\u{0}', '\u{0}']),
+ ('\u{108}', ['\u{109}', '\u{0}', '\u{0}']), ('\u{10a}', ['\u{10b}', '\u{0}', '\u{0}']),
+ ('\u{10c}', ['\u{10d}', '\u{0}', '\u{0}']), ('\u{10e}', ['\u{10f}', '\u{0}', '\u{0}']),
+ ('\u{110}', ['\u{111}', '\u{0}', '\u{0}']), ('\u{112}', ['\u{113}', '\u{0}', '\u{0}']),
+ ('\u{114}', ['\u{115}', '\u{0}', '\u{0}']), ('\u{116}', ['\u{117}', '\u{0}', '\u{0}']),
+ ('\u{118}', ['\u{119}', '\u{0}', '\u{0}']), ('\u{11a}', ['\u{11b}', '\u{0}', '\u{0}']),
+ ('\u{11c}', ['\u{11d}', '\u{0}', '\u{0}']), ('\u{11e}', ['\u{11f}', '\u{0}', '\u{0}']),
+ ('\u{120}', ['\u{121}', '\u{0}', '\u{0}']), ('\u{122}', ['\u{123}', '\u{0}', '\u{0}']),
+ ('\u{124}', ['\u{125}', '\u{0}', '\u{0}']), ('\u{126}', ['\u{127}', '\u{0}', '\u{0}']),
+ ('\u{128}', ['\u{129}', '\u{0}', '\u{0}']), ('\u{12a}', ['\u{12b}', '\u{0}', '\u{0}']),
+ ('\u{12c}', ['\u{12d}', '\u{0}', '\u{0}']), ('\u{12e}', ['\u{12f}', '\u{0}', '\u{0}']),
+ ('\u{130}', ['i', '\u{307}', '\u{0}']), ('\u{132}', ['\u{133}', '\u{0}', '\u{0}']),
+ ('\u{134}', ['\u{135}', '\u{0}', '\u{0}']), ('\u{136}', ['\u{137}', '\u{0}', '\u{0}']),
+ ('\u{139}', ['\u{13a}', '\u{0}', '\u{0}']), ('\u{13b}', ['\u{13c}', '\u{0}', '\u{0}']),
+ ('\u{13d}', ['\u{13e}', '\u{0}', '\u{0}']), ('\u{13f}', ['\u{140}', '\u{0}', '\u{0}']),
+ ('\u{141}', ['\u{142}', '\u{0}', '\u{0}']), ('\u{143}', ['\u{144}', '\u{0}', '\u{0}']),
+ ('\u{145}', ['\u{146}', '\u{0}', '\u{0}']), ('\u{147}', ['\u{148}', '\u{0}', '\u{0}']),
+ ('\u{14a}', ['\u{14b}', '\u{0}', '\u{0}']), ('\u{14c}', ['\u{14d}', '\u{0}', '\u{0}']),
+ ('\u{14e}', ['\u{14f}', '\u{0}', '\u{0}']), ('\u{150}', ['\u{151}', '\u{0}', '\u{0}']),
+ ('\u{152}', ['\u{153}', '\u{0}', '\u{0}']), ('\u{154}', ['\u{155}', '\u{0}', '\u{0}']),
+ ('\u{156}', ['\u{157}', '\u{0}', '\u{0}']), ('\u{158}', ['\u{159}', '\u{0}', '\u{0}']),
+ ('\u{15a}', ['\u{15b}', '\u{0}', '\u{0}']), ('\u{15c}', ['\u{15d}', '\u{0}', '\u{0}']),
+ ('\u{15e}', ['\u{15f}', '\u{0}', '\u{0}']), ('\u{160}', ['\u{161}', '\u{0}', '\u{0}']),
+ ('\u{162}', ['\u{163}', '\u{0}', '\u{0}']), ('\u{164}', ['\u{165}', '\u{0}', '\u{0}']),
+ ('\u{166}', ['\u{167}', '\u{0}', '\u{0}']), ('\u{168}', ['\u{169}', '\u{0}', '\u{0}']),
+ ('\u{16a}', ['\u{16b}', '\u{0}', '\u{0}']), ('\u{16c}', ['\u{16d}', '\u{0}', '\u{0}']),
+ ('\u{16e}', ['\u{16f}', '\u{0}', '\u{0}']), ('\u{170}', ['\u{171}', '\u{0}', '\u{0}']),
+ ('\u{172}', ['\u{173}', '\u{0}', '\u{0}']), ('\u{174}', ['\u{175}', '\u{0}', '\u{0}']),
+ ('\u{176}', ['\u{177}', '\u{0}', '\u{0}']), ('\u{178}', ['\u{ff}', '\u{0}', '\u{0}']),
+ ('\u{179}', ['\u{17a}', '\u{0}', '\u{0}']), ('\u{17b}', ['\u{17c}', '\u{0}', '\u{0}']),
+ ('\u{17d}', ['\u{17e}', '\u{0}', '\u{0}']), ('\u{181}', ['\u{253}', '\u{0}', '\u{0}']),
+ ('\u{182}', ['\u{183}', '\u{0}', '\u{0}']), ('\u{184}', ['\u{185}', '\u{0}', '\u{0}']),
+ ('\u{186}', ['\u{254}', '\u{0}', '\u{0}']), ('\u{187}', ['\u{188}', '\u{0}', '\u{0}']),
+ ('\u{189}', ['\u{256}', '\u{0}', '\u{0}']), ('\u{18a}', ['\u{257}', '\u{0}', '\u{0}']),
+ ('\u{18b}', ['\u{18c}', '\u{0}', '\u{0}']), ('\u{18e}', ['\u{1dd}', '\u{0}', '\u{0}']),
+ ('\u{18f}', ['\u{259}', '\u{0}', '\u{0}']), ('\u{190}', ['\u{25b}', '\u{0}', '\u{0}']),
+ ('\u{191}', ['\u{192}', '\u{0}', '\u{0}']), ('\u{193}', ['\u{260}', '\u{0}', '\u{0}']),
+ ('\u{194}', ['\u{263}', '\u{0}', '\u{0}']), ('\u{196}', ['\u{269}', '\u{0}', '\u{0}']),
+ ('\u{197}', ['\u{268}', '\u{0}', '\u{0}']), ('\u{198}', ['\u{199}', '\u{0}', '\u{0}']),
+ ('\u{19c}', ['\u{26f}', '\u{0}', '\u{0}']), ('\u{19d}', ['\u{272}', '\u{0}', '\u{0}']),
+ ('\u{19f}', ['\u{275}', '\u{0}', '\u{0}']), ('\u{1a0}', ['\u{1a1}', '\u{0}', '\u{0}']),
+ ('\u{1a2}', ['\u{1a3}', '\u{0}', '\u{0}']), ('\u{1a4}', ['\u{1a5}', '\u{0}', '\u{0}']),
+ ('\u{1a6}', ['\u{280}', '\u{0}', '\u{0}']), ('\u{1a7}', ['\u{1a8}', '\u{0}', '\u{0}']),
+ ('\u{1a9}', ['\u{283}', '\u{0}', '\u{0}']), ('\u{1ac}', ['\u{1ad}', '\u{0}', '\u{0}']),
+ ('\u{1ae}', ['\u{288}', '\u{0}', '\u{0}']), ('\u{1af}', ['\u{1b0}', '\u{0}', '\u{0}']),
+ ('\u{1b1}', ['\u{28a}', '\u{0}', '\u{0}']), ('\u{1b2}', ['\u{28b}', '\u{0}', '\u{0}']),
+ ('\u{1b3}', ['\u{1b4}', '\u{0}', '\u{0}']), ('\u{1b5}', ['\u{1b6}', '\u{0}', '\u{0}']),
+ ('\u{1b7}', ['\u{292}', '\u{0}', '\u{0}']), ('\u{1b8}', ['\u{1b9}', '\u{0}', '\u{0}']),
+ ('\u{1bc}', ['\u{1bd}', '\u{0}', '\u{0}']), ('\u{1c4}', ['\u{1c6}', '\u{0}', '\u{0}']),
+ ('\u{1c5}', ['\u{1c6}', '\u{0}', '\u{0}']), ('\u{1c7}', ['\u{1c9}', '\u{0}', '\u{0}']),
+ ('\u{1c8}', ['\u{1c9}', '\u{0}', '\u{0}']), ('\u{1ca}', ['\u{1cc}', '\u{0}', '\u{0}']),
+ ('\u{1cb}', ['\u{1cc}', '\u{0}', '\u{0}']), ('\u{1cd}', ['\u{1ce}', '\u{0}', '\u{0}']),
+ ('\u{1cf}', ['\u{1d0}', '\u{0}', '\u{0}']), ('\u{1d1}', ['\u{1d2}', '\u{0}', '\u{0}']),
+ ('\u{1d3}', ['\u{1d4}', '\u{0}', '\u{0}']), ('\u{1d5}', ['\u{1d6}', '\u{0}', '\u{0}']),
+ ('\u{1d7}', ['\u{1d8}', '\u{0}', '\u{0}']), ('\u{1d9}', ['\u{1da}', '\u{0}', '\u{0}']),
+ ('\u{1db}', ['\u{1dc}', '\u{0}', '\u{0}']), ('\u{1de}', ['\u{1df}', '\u{0}', '\u{0}']),
+ ('\u{1e0}', ['\u{1e1}', '\u{0}', '\u{0}']), ('\u{1e2}', ['\u{1e3}', '\u{0}', '\u{0}']),
+ ('\u{1e4}', ['\u{1e5}', '\u{0}', '\u{0}']), ('\u{1e6}', ['\u{1e7}', '\u{0}', '\u{0}']),
+ ('\u{1e8}', ['\u{1e9}', '\u{0}', '\u{0}']), ('\u{1ea}', ['\u{1eb}', '\u{0}', '\u{0}']),
+ ('\u{1ec}', ['\u{1ed}', '\u{0}', '\u{0}']), ('\u{1ee}', ['\u{1ef}', '\u{0}', '\u{0}']),
+ ('\u{1f1}', ['\u{1f3}', '\u{0}', '\u{0}']), ('\u{1f2}', ['\u{1f3}', '\u{0}', '\u{0}']),
+ ('\u{1f4}', ['\u{1f5}', '\u{0}', '\u{0}']), ('\u{1f6}', ['\u{195}', '\u{0}', '\u{0}']),
+ ('\u{1f7}', ['\u{1bf}', '\u{0}', '\u{0}']), ('\u{1f8}', ['\u{1f9}', '\u{0}', '\u{0}']),
+ ('\u{1fa}', ['\u{1fb}', '\u{0}', '\u{0}']), ('\u{1fc}', ['\u{1fd}', '\u{0}', '\u{0}']),
+ ('\u{1fe}', ['\u{1ff}', '\u{0}', '\u{0}']), ('\u{200}', ['\u{201}', '\u{0}', '\u{0}']),
+ ('\u{202}', ['\u{203}', '\u{0}', '\u{0}']), ('\u{204}', ['\u{205}', '\u{0}', '\u{0}']),
+ ('\u{206}', ['\u{207}', '\u{0}', '\u{0}']), ('\u{208}', ['\u{209}', '\u{0}', '\u{0}']),
+ ('\u{20a}', ['\u{20b}', '\u{0}', '\u{0}']), ('\u{20c}', ['\u{20d}', '\u{0}', '\u{0}']),
+ ('\u{20e}', ['\u{20f}', '\u{0}', '\u{0}']), ('\u{210}', ['\u{211}', '\u{0}', '\u{0}']),
+ ('\u{212}', ['\u{213}', '\u{0}', '\u{0}']), ('\u{214}', ['\u{215}', '\u{0}', '\u{0}']),
+ ('\u{216}', ['\u{217}', '\u{0}', '\u{0}']), ('\u{218}', ['\u{219}', '\u{0}', '\u{0}']),
+ ('\u{21a}', ['\u{21b}', '\u{0}', '\u{0}']), ('\u{21c}', ['\u{21d}', '\u{0}', '\u{0}']),
+ ('\u{21e}', ['\u{21f}', '\u{0}', '\u{0}']), ('\u{220}', ['\u{19e}', '\u{0}', '\u{0}']),
+ ('\u{222}', ['\u{223}', '\u{0}', '\u{0}']), ('\u{224}', ['\u{225}', '\u{0}', '\u{0}']),
+ ('\u{226}', ['\u{227}', '\u{0}', '\u{0}']), ('\u{228}', ['\u{229}', '\u{0}', '\u{0}']),
+ ('\u{22a}', ['\u{22b}', '\u{0}', '\u{0}']), ('\u{22c}', ['\u{22d}', '\u{0}', '\u{0}']),
+ ('\u{22e}', ['\u{22f}', '\u{0}', '\u{0}']), ('\u{230}', ['\u{231}', '\u{0}', '\u{0}']),
+ ('\u{232}', ['\u{233}', '\u{0}', '\u{0}']), ('\u{23a}', ['\u{2c65}', '\u{0}', '\u{0}']),
+ ('\u{23b}', ['\u{23c}', '\u{0}', '\u{0}']), ('\u{23d}', ['\u{19a}', '\u{0}', '\u{0}']),
+ ('\u{23e}', ['\u{2c66}', '\u{0}', '\u{0}']), ('\u{241}', ['\u{242}', '\u{0}', '\u{0}']),
+ ('\u{243}', ['\u{180}', '\u{0}', '\u{0}']), ('\u{244}', ['\u{289}', '\u{0}', '\u{0}']),
+ ('\u{245}', ['\u{28c}', '\u{0}', '\u{0}']), ('\u{246}', ['\u{247}', '\u{0}', '\u{0}']),
+ ('\u{248}', ['\u{249}', '\u{0}', '\u{0}']), ('\u{24a}', ['\u{24b}', '\u{0}', '\u{0}']),
+ ('\u{24c}', ['\u{24d}', '\u{0}', '\u{0}']), ('\u{24e}', ['\u{24f}', '\u{0}', '\u{0}']),
+ ('\u{370}', ['\u{371}', '\u{0}', '\u{0}']), ('\u{372}', ['\u{373}', '\u{0}', '\u{0}']),
+ ('\u{376}', ['\u{377}', '\u{0}', '\u{0}']), ('\u{37f}', ['\u{3f3}', '\u{0}', '\u{0}']),
+ ('\u{386}', ['\u{3ac}', '\u{0}', '\u{0}']), ('\u{388}', ['\u{3ad}', '\u{0}', '\u{0}']),
+ ('\u{389}', ['\u{3ae}', '\u{0}', '\u{0}']), ('\u{38a}', ['\u{3af}', '\u{0}', '\u{0}']),
+ ('\u{38c}', ['\u{3cc}', '\u{0}', '\u{0}']), ('\u{38e}', ['\u{3cd}', '\u{0}', '\u{0}']),
+ ('\u{38f}', ['\u{3ce}', '\u{0}', '\u{0}']), ('\u{391}', ['\u{3b1}', '\u{0}', '\u{0}']),
+ ('\u{392}', ['\u{3b2}', '\u{0}', '\u{0}']), ('\u{393}', ['\u{3b3}', '\u{0}', '\u{0}']),
+ ('\u{394}', ['\u{3b4}', '\u{0}', '\u{0}']), ('\u{395}', ['\u{3b5}', '\u{0}', '\u{0}']),
+ ('\u{396}', ['\u{3b6}', '\u{0}', '\u{0}']), ('\u{397}', ['\u{3b7}', '\u{0}', '\u{0}']),
+ ('\u{398}', ['\u{3b8}', '\u{0}', '\u{0}']), ('\u{399}', ['\u{3b9}', '\u{0}', '\u{0}']),
+ ('\u{39a}', ['\u{3ba}', '\u{0}', '\u{0}']), ('\u{39b}', ['\u{3bb}', '\u{0}', '\u{0}']),
+ ('\u{39c}', ['\u{3bc}', '\u{0}', '\u{0}']), ('\u{39d}', ['\u{3bd}', '\u{0}', '\u{0}']),
+ ('\u{39e}', ['\u{3be}', '\u{0}', '\u{0}']), ('\u{39f}', ['\u{3bf}', '\u{0}', '\u{0}']),
+ ('\u{3a0}', ['\u{3c0}', '\u{0}', '\u{0}']), ('\u{3a1}', ['\u{3c1}', '\u{0}', '\u{0}']),
+ ('\u{3a3}', ['\u{3c3}', '\u{0}', '\u{0}']), ('\u{3a4}', ['\u{3c4}', '\u{0}', '\u{0}']),
+ ('\u{3a5}', ['\u{3c5}', '\u{0}', '\u{0}']), ('\u{3a6}', ['\u{3c6}', '\u{0}', '\u{0}']),
+ ('\u{3a7}', ['\u{3c7}', '\u{0}', '\u{0}']), ('\u{3a8}', ['\u{3c8}', '\u{0}', '\u{0}']),
+ ('\u{3a9}', ['\u{3c9}', '\u{0}', '\u{0}']), ('\u{3aa}', ['\u{3ca}', '\u{0}', '\u{0}']),
+ ('\u{3ab}', ['\u{3cb}', '\u{0}', '\u{0}']), ('\u{3cf}', ['\u{3d7}', '\u{0}', '\u{0}']),
+ ('\u{3d8}', ['\u{3d9}', '\u{0}', '\u{0}']), ('\u{3da}', ['\u{3db}', '\u{0}', '\u{0}']),
+ ('\u{3dc}', ['\u{3dd}', '\u{0}', '\u{0}']), ('\u{3de}', ['\u{3df}', '\u{0}', '\u{0}']),
+ ('\u{3e0}', ['\u{3e1}', '\u{0}', '\u{0}']), ('\u{3e2}', ['\u{3e3}', '\u{0}', '\u{0}']),
+ ('\u{3e4}', ['\u{3e5}', '\u{0}', '\u{0}']), ('\u{3e6}', ['\u{3e7}', '\u{0}', '\u{0}']),
+ ('\u{3e8}', ['\u{3e9}', '\u{0}', '\u{0}']), ('\u{3ea}', ['\u{3eb}', '\u{0}', '\u{0}']),
+ ('\u{3ec}', ['\u{3ed}', '\u{0}', '\u{0}']), ('\u{3ee}', ['\u{3ef}', '\u{0}', '\u{0}']),
+ ('\u{3f4}', ['\u{3b8}', '\u{0}', '\u{0}']), ('\u{3f7}', ['\u{3f8}', '\u{0}', '\u{0}']),
+ ('\u{3f9}', ['\u{3f2}', '\u{0}', '\u{0}']), ('\u{3fa}', ['\u{3fb}', '\u{0}', '\u{0}']),
+ ('\u{3fd}', ['\u{37b}', '\u{0}', '\u{0}']), ('\u{3fe}', ['\u{37c}', '\u{0}', '\u{0}']),
+ ('\u{3ff}', ['\u{37d}', '\u{0}', '\u{0}']), ('\u{400}', ['\u{450}', '\u{0}', '\u{0}']),
+ ('\u{401}', ['\u{451}', '\u{0}', '\u{0}']), ('\u{402}', ['\u{452}', '\u{0}', '\u{0}']),
+ ('\u{403}', ['\u{453}', '\u{0}', '\u{0}']), ('\u{404}', ['\u{454}', '\u{0}', '\u{0}']),
+ ('\u{405}', ['\u{455}', '\u{0}', '\u{0}']), ('\u{406}', ['\u{456}', '\u{0}', '\u{0}']),
+ ('\u{407}', ['\u{457}', '\u{0}', '\u{0}']), ('\u{408}', ['\u{458}', '\u{0}', '\u{0}']),
+ ('\u{409}', ['\u{459}', '\u{0}', '\u{0}']), ('\u{40a}', ['\u{45a}', '\u{0}', '\u{0}']),
+ ('\u{40b}', ['\u{45b}', '\u{0}', '\u{0}']), ('\u{40c}', ['\u{45c}', '\u{0}', '\u{0}']),
+ ('\u{40d}', ['\u{45d}', '\u{0}', '\u{0}']), ('\u{40e}', ['\u{45e}', '\u{0}', '\u{0}']),
+ ('\u{40f}', ['\u{45f}', '\u{0}', '\u{0}']), ('\u{410}', ['\u{430}', '\u{0}', '\u{0}']),
+ ('\u{411}', ['\u{431}', '\u{0}', '\u{0}']), ('\u{412}', ['\u{432}', '\u{0}', '\u{0}']),
+ ('\u{413}', ['\u{433}', '\u{0}', '\u{0}']), ('\u{414}', ['\u{434}', '\u{0}', '\u{0}']),
+ ('\u{415}', ['\u{435}', '\u{0}', '\u{0}']), ('\u{416}', ['\u{436}', '\u{0}', '\u{0}']),
+ ('\u{417}', ['\u{437}', '\u{0}', '\u{0}']), ('\u{418}', ['\u{438}', '\u{0}', '\u{0}']),
+ ('\u{419}', ['\u{439}', '\u{0}', '\u{0}']), ('\u{41a}', ['\u{43a}', '\u{0}', '\u{0}']),
+ ('\u{41b}', ['\u{43b}', '\u{0}', '\u{0}']), ('\u{41c}', ['\u{43c}', '\u{0}', '\u{0}']),
+ ('\u{41d}', ['\u{43d}', '\u{0}', '\u{0}']), ('\u{41e}', ['\u{43e}', '\u{0}', '\u{0}']),
+ ('\u{41f}', ['\u{43f}', '\u{0}', '\u{0}']), ('\u{420}', ['\u{440}', '\u{0}', '\u{0}']),
+ ('\u{421}', ['\u{441}', '\u{0}', '\u{0}']), ('\u{422}', ['\u{442}', '\u{0}', '\u{0}']),
+ ('\u{423}', ['\u{443}', '\u{0}', '\u{0}']), ('\u{424}', ['\u{444}', '\u{0}', '\u{0}']),
+ ('\u{425}', ['\u{445}', '\u{0}', '\u{0}']), ('\u{426}', ['\u{446}', '\u{0}', '\u{0}']),
+ ('\u{427}', ['\u{447}', '\u{0}', '\u{0}']), ('\u{428}', ['\u{448}', '\u{0}', '\u{0}']),
+ ('\u{429}', ['\u{449}', '\u{0}', '\u{0}']), ('\u{42a}', ['\u{44a}', '\u{0}', '\u{0}']),
+ ('\u{42b}', ['\u{44b}', '\u{0}', '\u{0}']), ('\u{42c}', ['\u{44c}', '\u{0}', '\u{0}']),
+ ('\u{42d}', ['\u{44d}', '\u{0}', '\u{0}']), ('\u{42e}', ['\u{44e}', '\u{0}', '\u{0}']),
+ ('\u{42f}', ['\u{44f}', '\u{0}', '\u{0}']), ('\u{460}', ['\u{461}', '\u{0}', '\u{0}']),
+ ('\u{462}', ['\u{463}', '\u{0}', '\u{0}']), ('\u{464}', ['\u{465}', '\u{0}', '\u{0}']),
+ ('\u{466}', ['\u{467}', '\u{0}', '\u{0}']), ('\u{468}', ['\u{469}', '\u{0}', '\u{0}']),
+ ('\u{46a}', ['\u{46b}', '\u{0}', '\u{0}']), ('\u{46c}', ['\u{46d}', '\u{0}', '\u{0}']),
+ ('\u{46e}', ['\u{46f}', '\u{0}', '\u{0}']), ('\u{470}', ['\u{471}', '\u{0}', '\u{0}']),
+ ('\u{472}', ['\u{473}', '\u{0}', '\u{0}']), ('\u{474}', ['\u{475}', '\u{0}', '\u{0}']),
+ ('\u{476}', ['\u{477}', '\u{0}', '\u{0}']), ('\u{478}', ['\u{479}', '\u{0}', '\u{0}']),
+ ('\u{47a}', ['\u{47b}', '\u{0}', '\u{0}']), ('\u{47c}', ['\u{47d}', '\u{0}', '\u{0}']),
+ ('\u{47e}', ['\u{47f}', '\u{0}', '\u{0}']), ('\u{480}', ['\u{481}', '\u{0}', '\u{0}']),
+ ('\u{48a}', ['\u{48b}', '\u{0}', '\u{0}']), ('\u{48c}', ['\u{48d}', '\u{0}', '\u{0}']),
+ ('\u{48e}', ['\u{48f}', '\u{0}', '\u{0}']), ('\u{490}', ['\u{491}', '\u{0}', '\u{0}']),
+ ('\u{492}', ['\u{493}', '\u{0}', '\u{0}']), ('\u{494}', ['\u{495}', '\u{0}', '\u{0}']),
+ ('\u{496}', ['\u{497}', '\u{0}', '\u{0}']), ('\u{498}', ['\u{499}', '\u{0}', '\u{0}']),
+ ('\u{49a}', ['\u{49b}', '\u{0}', '\u{0}']), ('\u{49c}', ['\u{49d}', '\u{0}', '\u{0}']),
+ ('\u{49e}', ['\u{49f}', '\u{0}', '\u{0}']), ('\u{4a0}', ['\u{4a1}', '\u{0}', '\u{0}']),
+ ('\u{4a2}', ['\u{4a3}', '\u{0}', '\u{0}']), ('\u{4a4}', ['\u{4a5}', '\u{0}', '\u{0}']),
+ ('\u{4a6}', ['\u{4a7}', '\u{0}', '\u{0}']), ('\u{4a8}', ['\u{4a9}', '\u{0}', '\u{0}']),
+ ('\u{4aa}', ['\u{4ab}', '\u{0}', '\u{0}']), ('\u{4ac}', ['\u{4ad}', '\u{0}', '\u{0}']),
+ ('\u{4ae}', ['\u{4af}', '\u{0}', '\u{0}']), ('\u{4b0}', ['\u{4b1}', '\u{0}', '\u{0}']),
+ ('\u{4b2}', ['\u{4b3}', '\u{0}', '\u{0}']), ('\u{4b4}', ['\u{4b5}', '\u{0}', '\u{0}']),
+ ('\u{4b6}', ['\u{4b7}', '\u{0}', '\u{0}']), ('\u{4b8}', ['\u{4b9}', '\u{0}', '\u{0}']),
+ ('\u{4ba}', ['\u{4bb}', '\u{0}', '\u{0}']), ('\u{4bc}', ['\u{4bd}', '\u{0}', '\u{0}']),
+ ('\u{4be}', ['\u{4bf}', '\u{0}', '\u{0}']), ('\u{4c0}', ['\u{4cf}', '\u{0}', '\u{0}']),
+ ('\u{4c1}', ['\u{4c2}', '\u{0}', '\u{0}']), ('\u{4c3}', ['\u{4c4}', '\u{0}', '\u{0}']),
+ ('\u{4c5}', ['\u{4c6}', '\u{0}', '\u{0}']), ('\u{4c7}', ['\u{4c8}', '\u{0}', '\u{0}']),
+ ('\u{4c9}', ['\u{4ca}', '\u{0}', '\u{0}']), ('\u{4cb}', ['\u{4cc}', '\u{0}', '\u{0}']),
+ ('\u{4cd}', ['\u{4ce}', '\u{0}', '\u{0}']), ('\u{4d0}', ['\u{4d1}', '\u{0}', '\u{0}']),
+ ('\u{4d2}', ['\u{4d3}', '\u{0}', '\u{0}']), ('\u{4d4}', ['\u{4d5}', '\u{0}', '\u{0}']),
+ ('\u{4d6}', ['\u{4d7}', '\u{0}', '\u{0}']), ('\u{4d8}', ['\u{4d9}', '\u{0}', '\u{0}']),
+ ('\u{4da}', ['\u{4db}', '\u{0}', '\u{0}']), ('\u{4dc}', ['\u{4dd}', '\u{0}', '\u{0}']),
+ ('\u{4de}', ['\u{4df}', '\u{0}', '\u{0}']), ('\u{4e0}', ['\u{4e1}', '\u{0}', '\u{0}']),
+ ('\u{4e2}', ['\u{4e3}', '\u{0}', '\u{0}']), ('\u{4e4}', ['\u{4e5}', '\u{0}', '\u{0}']),
+ ('\u{4e6}', ['\u{4e7}', '\u{0}', '\u{0}']), ('\u{4e8}', ['\u{4e9}', '\u{0}', '\u{0}']),
+ ('\u{4ea}', ['\u{4eb}', '\u{0}', '\u{0}']), ('\u{4ec}', ['\u{4ed}', '\u{0}', '\u{0}']),
+ ('\u{4ee}', ['\u{4ef}', '\u{0}', '\u{0}']), ('\u{4f0}', ['\u{4f1}', '\u{0}', '\u{0}']),
+ ('\u{4f2}', ['\u{4f3}', '\u{0}', '\u{0}']), ('\u{4f4}', ['\u{4f5}', '\u{0}', '\u{0}']),
+ ('\u{4f6}', ['\u{4f7}', '\u{0}', '\u{0}']), ('\u{4f8}', ['\u{4f9}', '\u{0}', '\u{0}']),
+ ('\u{4fa}', ['\u{4fb}', '\u{0}', '\u{0}']), ('\u{4fc}', ['\u{4fd}', '\u{0}', '\u{0}']),
+ ('\u{4fe}', ['\u{4ff}', '\u{0}', '\u{0}']), ('\u{500}', ['\u{501}', '\u{0}', '\u{0}']),
+ ('\u{502}', ['\u{503}', '\u{0}', '\u{0}']), ('\u{504}', ['\u{505}', '\u{0}', '\u{0}']),
+ ('\u{506}', ['\u{507}', '\u{0}', '\u{0}']), ('\u{508}', ['\u{509}', '\u{0}', '\u{0}']),
+ ('\u{50a}', ['\u{50b}', '\u{0}', '\u{0}']), ('\u{50c}', ['\u{50d}', '\u{0}', '\u{0}']),
+ ('\u{50e}', ['\u{50f}', '\u{0}', '\u{0}']), ('\u{510}', ['\u{511}', '\u{0}', '\u{0}']),
+ ('\u{512}', ['\u{513}', '\u{0}', '\u{0}']), ('\u{514}', ['\u{515}', '\u{0}', '\u{0}']),
+ ('\u{516}', ['\u{517}', '\u{0}', '\u{0}']), ('\u{518}', ['\u{519}', '\u{0}', '\u{0}']),
+ ('\u{51a}', ['\u{51b}', '\u{0}', '\u{0}']), ('\u{51c}', ['\u{51d}', '\u{0}', '\u{0}']),
+ ('\u{51e}', ['\u{51f}', '\u{0}', '\u{0}']), ('\u{520}', ['\u{521}', '\u{0}', '\u{0}']),
+ ('\u{522}', ['\u{523}', '\u{0}', '\u{0}']), ('\u{524}', ['\u{525}', '\u{0}', '\u{0}']),
+ ('\u{526}', ['\u{527}', '\u{0}', '\u{0}']), ('\u{528}', ['\u{529}', '\u{0}', '\u{0}']),
+ ('\u{52a}', ['\u{52b}', '\u{0}', '\u{0}']), ('\u{52c}', ['\u{52d}', '\u{0}', '\u{0}']),
+ ('\u{52e}', ['\u{52f}', '\u{0}', '\u{0}']), ('\u{531}', ['\u{561}', '\u{0}', '\u{0}']),
+ ('\u{532}', ['\u{562}', '\u{0}', '\u{0}']), ('\u{533}', ['\u{563}', '\u{0}', '\u{0}']),
+ ('\u{534}', ['\u{564}', '\u{0}', '\u{0}']), ('\u{535}', ['\u{565}', '\u{0}', '\u{0}']),
+ ('\u{536}', ['\u{566}', '\u{0}', '\u{0}']), ('\u{537}', ['\u{567}', '\u{0}', '\u{0}']),
+ ('\u{538}', ['\u{568}', '\u{0}', '\u{0}']), ('\u{539}', ['\u{569}', '\u{0}', '\u{0}']),
+ ('\u{53a}', ['\u{56a}', '\u{0}', '\u{0}']), ('\u{53b}', ['\u{56b}', '\u{0}', '\u{0}']),
+ ('\u{53c}', ['\u{56c}', '\u{0}', '\u{0}']), ('\u{53d}', ['\u{56d}', '\u{0}', '\u{0}']),
+ ('\u{53e}', ['\u{56e}', '\u{0}', '\u{0}']), ('\u{53f}', ['\u{56f}', '\u{0}', '\u{0}']),
+ ('\u{540}', ['\u{570}', '\u{0}', '\u{0}']), ('\u{541}', ['\u{571}', '\u{0}', '\u{0}']),
+ ('\u{542}', ['\u{572}', '\u{0}', '\u{0}']), ('\u{543}', ['\u{573}', '\u{0}', '\u{0}']),
+ ('\u{544}', ['\u{574}', '\u{0}', '\u{0}']), ('\u{545}', ['\u{575}', '\u{0}', '\u{0}']),
+ ('\u{546}', ['\u{576}', '\u{0}', '\u{0}']), ('\u{547}', ['\u{577}', '\u{0}', '\u{0}']),
+ ('\u{548}', ['\u{578}', '\u{0}', '\u{0}']), ('\u{549}', ['\u{579}', '\u{0}', '\u{0}']),
+ ('\u{54a}', ['\u{57a}', '\u{0}', '\u{0}']), ('\u{54b}', ['\u{57b}', '\u{0}', '\u{0}']),
+ ('\u{54c}', ['\u{57c}', '\u{0}', '\u{0}']), ('\u{54d}', ['\u{57d}', '\u{0}', '\u{0}']),
+ ('\u{54e}', ['\u{57e}', '\u{0}', '\u{0}']), ('\u{54f}', ['\u{57f}', '\u{0}', '\u{0}']),
+ ('\u{550}', ['\u{580}', '\u{0}', '\u{0}']), ('\u{551}', ['\u{581}', '\u{0}', '\u{0}']),
+ ('\u{552}', ['\u{582}', '\u{0}', '\u{0}']), ('\u{553}', ['\u{583}', '\u{0}', '\u{0}']),
+ ('\u{554}', ['\u{584}', '\u{0}', '\u{0}']), ('\u{555}', ['\u{585}', '\u{0}', '\u{0}']),
+ ('\u{556}', ['\u{586}', '\u{0}', '\u{0}']), ('\u{10a0}', ['\u{2d00}', '\u{0}', '\u{0}']),
+ ('\u{10a1}', ['\u{2d01}', '\u{0}', '\u{0}']), ('\u{10a2}', ['\u{2d02}', '\u{0}', '\u{0}']),
+ ('\u{10a3}', ['\u{2d03}', '\u{0}', '\u{0}']), ('\u{10a4}', ['\u{2d04}', '\u{0}', '\u{0}']),
+ ('\u{10a5}', ['\u{2d05}', '\u{0}', '\u{0}']), ('\u{10a6}', ['\u{2d06}', '\u{0}', '\u{0}']),
+ ('\u{10a7}', ['\u{2d07}', '\u{0}', '\u{0}']), ('\u{10a8}', ['\u{2d08}', '\u{0}', '\u{0}']),
+ ('\u{10a9}', ['\u{2d09}', '\u{0}', '\u{0}']), ('\u{10aa}', ['\u{2d0a}', '\u{0}', '\u{0}']),
+ ('\u{10ab}', ['\u{2d0b}', '\u{0}', '\u{0}']), ('\u{10ac}', ['\u{2d0c}', '\u{0}', '\u{0}']),
+ ('\u{10ad}', ['\u{2d0d}', '\u{0}', '\u{0}']), ('\u{10ae}', ['\u{2d0e}', '\u{0}', '\u{0}']),
+ ('\u{10af}', ['\u{2d0f}', '\u{0}', '\u{0}']), ('\u{10b0}', ['\u{2d10}', '\u{0}', '\u{0}']),
+ ('\u{10b1}', ['\u{2d11}', '\u{0}', '\u{0}']), ('\u{10b2}', ['\u{2d12}', '\u{0}', '\u{0}']),
+ ('\u{10b3}', ['\u{2d13}', '\u{0}', '\u{0}']), ('\u{10b4}', ['\u{2d14}', '\u{0}', '\u{0}']),
+ ('\u{10b5}', ['\u{2d15}', '\u{0}', '\u{0}']), ('\u{10b6}', ['\u{2d16}', '\u{0}', '\u{0}']),
+ ('\u{10b7}', ['\u{2d17}', '\u{0}', '\u{0}']), ('\u{10b8}', ['\u{2d18}', '\u{0}', '\u{0}']),
+ ('\u{10b9}', ['\u{2d19}', '\u{0}', '\u{0}']), ('\u{10ba}', ['\u{2d1a}', '\u{0}', '\u{0}']),
+ ('\u{10bb}', ['\u{2d1b}', '\u{0}', '\u{0}']), ('\u{10bc}', ['\u{2d1c}', '\u{0}', '\u{0}']),
+ ('\u{10bd}', ['\u{2d1d}', '\u{0}', '\u{0}']), ('\u{10be}', ['\u{2d1e}', '\u{0}', '\u{0}']),
+ ('\u{10bf}', ['\u{2d1f}', '\u{0}', '\u{0}']), ('\u{10c0}', ['\u{2d20}', '\u{0}', '\u{0}']),
+ ('\u{10c1}', ['\u{2d21}', '\u{0}', '\u{0}']), ('\u{10c2}', ['\u{2d22}', '\u{0}', '\u{0}']),
+ ('\u{10c3}', ['\u{2d23}', '\u{0}', '\u{0}']), ('\u{10c4}', ['\u{2d24}', '\u{0}', '\u{0}']),
+ ('\u{10c5}', ['\u{2d25}', '\u{0}', '\u{0}']), ('\u{10c7}', ['\u{2d27}', '\u{0}', '\u{0}']),
+ ('\u{10cd}', ['\u{2d2d}', '\u{0}', '\u{0}']), ('\u{13a0}', ['\u{ab70}', '\u{0}', '\u{0}']),
+ ('\u{13a1}', ['\u{ab71}', '\u{0}', '\u{0}']), ('\u{13a2}', ['\u{ab72}', '\u{0}', '\u{0}']),
+ ('\u{13a3}', ['\u{ab73}', '\u{0}', '\u{0}']), ('\u{13a4}', ['\u{ab74}', '\u{0}', '\u{0}']),
+ ('\u{13a5}', ['\u{ab75}', '\u{0}', '\u{0}']), ('\u{13a6}', ['\u{ab76}', '\u{0}', '\u{0}']),
+ ('\u{13a7}', ['\u{ab77}', '\u{0}', '\u{0}']), ('\u{13a8}', ['\u{ab78}', '\u{0}', '\u{0}']),
+ ('\u{13a9}', ['\u{ab79}', '\u{0}', '\u{0}']), ('\u{13aa}', ['\u{ab7a}', '\u{0}', '\u{0}']),
+ ('\u{13ab}', ['\u{ab7b}', '\u{0}', '\u{0}']), ('\u{13ac}', ['\u{ab7c}', '\u{0}', '\u{0}']),
+ ('\u{13ad}', ['\u{ab7d}', '\u{0}', '\u{0}']), ('\u{13ae}', ['\u{ab7e}', '\u{0}', '\u{0}']),
+ ('\u{13af}', ['\u{ab7f}', '\u{0}', '\u{0}']), ('\u{13b0}', ['\u{ab80}', '\u{0}', '\u{0}']),
+ ('\u{13b1}', ['\u{ab81}', '\u{0}', '\u{0}']), ('\u{13b2}', ['\u{ab82}', '\u{0}', '\u{0}']),
+ ('\u{13b3}', ['\u{ab83}', '\u{0}', '\u{0}']), ('\u{13b4}', ['\u{ab84}', '\u{0}', '\u{0}']),
+ ('\u{13b5}', ['\u{ab85}', '\u{0}', '\u{0}']), ('\u{13b6}', ['\u{ab86}', '\u{0}', '\u{0}']),
+ ('\u{13b7}', ['\u{ab87}', '\u{0}', '\u{0}']), ('\u{13b8}', ['\u{ab88}', '\u{0}', '\u{0}']),
+ ('\u{13b9}', ['\u{ab89}', '\u{0}', '\u{0}']), ('\u{13ba}', ['\u{ab8a}', '\u{0}', '\u{0}']),
+ ('\u{13bb}', ['\u{ab8b}', '\u{0}', '\u{0}']), ('\u{13bc}', ['\u{ab8c}', '\u{0}', '\u{0}']),
+ ('\u{13bd}', ['\u{ab8d}', '\u{0}', '\u{0}']), ('\u{13be}', ['\u{ab8e}', '\u{0}', '\u{0}']),
+ ('\u{13bf}', ['\u{ab8f}', '\u{0}', '\u{0}']), ('\u{13c0}', ['\u{ab90}', '\u{0}', '\u{0}']),
+ ('\u{13c1}', ['\u{ab91}', '\u{0}', '\u{0}']), ('\u{13c2}', ['\u{ab92}', '\u{0}', '\u{0}']),
+ ('\u{13c3}', ['\u{ab93}', '\u{0}', '\u{0}']), ('\u{13c4}', ['\u{ab94}', '\u{0}', '\u{0}']),
+ ('\u{13c5}', ['\u{ab95}', '\u{0}', '\u{0}']), ('\u{13c6}', ['\u{ab96}', '\u{0}', '\u{0}']),
+ ('\u{13c7}', ['\u{ab97}', '\u{0}', '\u{0}']), ('\u{13c8}', ['\u{ab98}', '\u{0}', '\u{0}']),
+ ('\u{13c9}', ['\u{ab99}', '\u{0}', '\u{0}']), ('\u{13ca}', ['\u{ab9a}', '\u{0}', '\u{0}']),
+ ('\u{13cb}', ['\u{ab9b}', '\u{0}', '\u{0}']), ('\u{13cc}', ['\u{ab9c}', '\u{0}', '\u{0}']),
+ ('\u{13cd}', ['\u{ab9d}', '\u{0}', '\u{0}']), ('\u{13ce}', ['\u{ab9e}', '\u{0}', '\u{0}']),
+ ('\u{13cf}', ['\u{ab9f}', '\u{0}', '\u{0}']), ('\u{13d0}', ['\u{aba0}', '\u{0}', '\u{0}']),
+ ('\u{13d1}', ['\u{aba1}', '\u{0}', '\u{0}']), ('\u{13d2}', ['\u{aba2}', '\u{0}', '\u{0}']),
+ ('\u{13d3}', ['\u{aba3}', '\u{0}', '\u{0}']), ('\u{13d4}', ['\u{aba4}', '\u{0}', '\u{0}']),
+ ('\u{13d5}', ['\u{aba5}', '\u{0}', '\u{0}']), ('\u{13d6}', ['\u{aba6}', '\u{0}', '\u{0}']),
+ ('\u{13d7}', ['\u{aba7}', '\u{0}', '\u{0}']), ('\u{13d8}', ['\u{aba8}', '\u{0}', '\u{0}']),
+ ('\u{13d9}', ['\u{aba9}', '\u{0}', '\u{0}']), ('\u{13da}', ['\u{abaa}', '\u{0}', '\u{0}']),
+ ('\u{13db}', ['\u{abab}', '\u{0}', '\u{0}']), ('\u{13dc}', ['\u{abac}', '\u{0}', '\u{0}']),
+ ('\u{13dd}', ['\u{abad}', '\u{0}', '\u{0}']), ('\u{13de}', ['\u{abae}', '\u{0}', '\u{0}']),
+ ('\u{13df}', ['\u{abaf}', '\u{0}', '\u{0}']), ('\u{13e0}', ['\u{abb0}', '\u{0}', '\u{0}']),
+ ('\u{13e1}', ['\u{abb1}', '\u{0}', '\u{0}']), ('\u{13e2}', ['\u{abb2}', '\u{0}', '\u{0}']),
+ ('\u{13e3}', ['\u{abb3}', '\u{0}', '\u{0}']), ('\u{13e4}', ['\u{abb4}', '\u{0}', '\u{0}']),
+ ('\u{13e5}', ['\u{abb5}', '\u{0}', '\u{0}']), ('\u{13e6}', ['\u{abb6}', '\u{0}', '\u{0}']),
+ ('\u{13e7}', ['\u{abb7}', '\u{0}', '\u{0}']), ('\u{13e8}', ['\u{abb8}', '\u{0}', '\u{0}']),
+ ('\u{13e9}', ['\u{abb9}', '\u{0}', '\u{0}']), ('\u{13ea}', ['\u{abba}', '\u{0}', '\u{0}']),
+ ('\u{13eb}', ['\u{abbb}', '\u{0}', '\u{0}']), ('\u{13ec}', ['\u{abbc}', '\u{0}', '\u{0}']),
+ ('\u{13ed}', ['\u{abbd}', '\u{0}', '\u{0}']), ('\u{13ee}', ['\u{abbe}', '\u{0}', '\u{0}']),
+ ('\u{13ef}', ['\u{abbf}', '\u{0}', '\u{0}']), ('\u{13f0}', ['\u{13f8}', '\u{0}', '\u{0}']),
+ ('\u{13f1}', ['\u{13f9}', '\u{0}', '\u{0}']), ('\u{13f2}', ['\u{13fa}', '\u{0}', '\u{0}']),
+ ('\u{13f3}', ['\u{13fb}', '\u{0}', '\u{0}']), ('\u{13f4}', ['\u{13fc}', '\u{0}', '\u{0}']),
+ ('\u{13f5}', ['\u{13fd}', '\u{0}', '\u{0}']), ('\u{1c90}', ['\u{10d0}', '\u{0}', '\u{0}']),
+ ('\u{1c91}', ['\u{10d1}', '\u{0}', '\u{0}']), ('\u{1c92}', ['\u{10d2}', '\u{0}', '\u{0}']),
+ ('\u{1c93}', ['\u{10d3}', '\u{0}', '\u{0}']), ('\u{1c94}', ['\u{10d4}', '\u{0}', '\u{0}']),
+ ('\u{1c95}', ['\u{10d5}', '\u{0}', '\u{0}']), ('\u{1c96}', ['\u{10d6}', '\u{0}', '\u{0}']),
+ ('\u{1c97}', ['\u{10d7}', '\u{0}', '\u{0}']), ('\u{1c98}', ['\u{10d8}', '\u{0}', '\u{0}']),
+ ('\u{1c99}', ['\u{10d9}', '\u{0}', '\u{0}']), ('\u{1c9a}', ['\u{10da}', '\u{0}', '\u{0}']),
+ ('\u{1c9b}', ['\u{10db}', '\u{0}', '\u{0}']), ('\u{1c9c}', ['\u{10dc}', '\u{0}', '\u{0}']),
+ ('\u{1c9d}', ['\u{10dd}', '\u{0}', '\u{0}']), ('\u{1c9e}', ['\u{10de}', '\u{0}', '\u{0}']),
+ ('\u{1c9f}', ['\u{10df}', '\u{0}', '\u{0}']), ('\u{1ca0}', ['\u{10e0}', '\u{0}', '\u{0}']),
+ ('\u{1ca1}', ['\u{10e1}', '\u{0}', '\u{0}']), ('\u{1ca2}', ['\u{10e2}', '\u{0}', '\u{0}']),
+ ('\u{1ca3}', ['\u{10e3}', '\u{0}', '\u{0}']), ('\u{1ca4}', ['\u{10e4}', '\u{0}', '\u{0}']),
+ ('\u{1ca5}', ['\u{10e5}', '\u{0}', '\u{0}']), ('\u{1ca6}', ['\u{10e6}', '\u{0}', '\u{0}']),
+ ('\u{1ca7}', ['\u{10e7}', '\u{0}', '\u{0}']), ('\u{1ca8}', ['\u{10e8}', '\u{0}', '\u{0}']),
+ ('\u{1ca9}', ['\u{10e9}', '\u{0}', '\u{0}']), ('\u{1caa}', ['\u{10ea}', '\u{0}', '\u{0}']),
+ ('\u{1cab}', ['\u{10eb}', '\u{0}', '\u{0}']), ('\u{1cac}', ['\u{10ec}', '\u{0}', '\u{0}']),
+ ('\u{1cad}', ['\u{10ed}', '\u{0}', '\u{0}']), ('\u{1cae}', ['\u{10ee}', '\u{0}', '\u{0}']),
+ ('\u{1caf}', ['\u{10ef}', '\u{0}', '\u{0}']), ('\u{1cb0}', ['\u{10f0}', '\u{0}', '\u{0}']),
+ ('\u{1cb1}', ['\u{10f1}', '\u{0}', '\u{0}']), ('\u{1cb2}', ['\u{10f2}', '\u{0}', '\u{0}']),
+ ('\u{1cb3}', ['\u{10f3}', '\u{0}', '\u{0}']), ('\u{1cb4}', ['\u{10f4}', '\u{0}', '\u{0}']),
+ ('\u{1cb5}', ['\u{10f5}', '\u{0}', '\u{0}']), ('\u{1cb6}', ['\u{10f6}', '\u{0}', '\u{0}']),
+ ('\u{1cb7}', ['\u{10f7}', '\u{0}', '\u{0}']), ('\u{1cb8}', ['\u{10f8}', '\u{0}', '\u{0}']),
+ ('\u{1cb9}', ['\u{10f9}', '\u{0}', '\u{0}']), ('\u{1cba}', ['\u{10fa}', '\u{0}', '\u{0}']),
+ ('\u{1cbd}', ['\u{10fd}', '\u{0}', '\u{0}']), ('\u{1cbe}', ['\u{10fe}', '\u{0}', '\u{0}']),
+ ('\u{1cbf}', ['\u{10ff}', '\u{0}', '\u{0}']), ('\u{1e00}', ['\u{1e01}', '\u{0}', '\u{0}']),
+ ('\u{1e02}', ['\u{1e03}', '\u{0}', '\u{0}']), ('\u{1e04}', ['\u{1e05}', '\u{0}', '\u{0}']),
+ ('\u{1e06}', ['\u{1e07}', '\u{0}', '\u{0}']), ('\u{1e08}', ['\u{1e09}', '\u{0}', '\u{0}']),
+ ('\u{1e0a}', ['\u{1e0b}', '\u{0}', '\u{0}']), ('\u{1e0c}', ['\u{1e0d}', '\u{0}', '\u{0}']),
+ ('\u{1e0e}', ['\u{1e0f}', '\u{0}', '\u{0}']), ('\u{1e10}', ['\u{1e11}', '\u{0}', '\u{0}']),
+ ('\u{1e12}', ['\u{1e13}', '\u{0}', '\u{0}']), ('\u{1e14}', ['\u{1e15}', '\u{0}', '\u{0}']),
+ ('\u{1e16}', ['\u{1e17}', '\u{0}', '\u{0}']), ('\u{1e18}', ['\u{1e19}', '\u{0}', '\u{0}']),
+ ('\u{1e1a}', ['\u{1e1b}', '\u{0}', '\u{0}']), ('\u{1e1c}', ['\u{1e1d}', '\u{0}', '\u{0}']),
+ ('\u{1e1e}', ['\u{1e1f}', '\u{0}', '\u{0}']), ('\u{1e20}', ['\u{1e21}', '\u{0}', '\u{0}']),
+ ('\u{1e22}', ['\u{1e23}', '\u{0}', '\u{0}']), ('\u{1e24}', ['\u{1e25}', '\u{0}', '\u{0}']),
+ ('\u{1e26}', ['\u{1e27}', '\u{0}', '\u{0}']), ('\u{1e28}', ['\u{1e29}', '\u{0}', '\u{0}']),
+ ('\u{1e2a}', ['\u{1e2b}', '\u{0}', '\u{0}']), ('\u{1e2c}', ['\u{1e2d}', '\u{0}', '\u{0}']),
+ ('\u{1e2e}', ['\u{1e2f}', '\u{0}', '\u{0}']), ('\u{1e30}', ['\u{1e31}', '\u{0}', '\u{0}']),
+ ('\u{1e32}', ['\u{1e33}', '\u{0}', '\u{0}']), ('\u{1e34}', ['\u{1e35}', '\u{0}', '\u{0}']),
+ ('\u{1e36}', ['\u{1e37}', '\u{0}', '\u{0}']), ('\u{1e38}', ['\u{1e39}', '\u{0}', '\u{0}']),
+ ('\u{1e3a}', ['\u{1e3b}', '\u{0}', '\u{0}']), ('\u{1e3c}', ['\u{1e3d}', '\u{0}', '\u{0}']),
+ ('\u{1e3e}', ['\u{1e3f}', '\u{0}', '\u{0}']), ('\u{1e40}', ['\u{1e41}', '\u{0}', '\u{0}']),
+ ('\u{1e42}', ['\u{1e43}', '\u{0}', '\u{0}']), ('\u{1e44}', ['\u{1e45}', '\u{0}', '\u{0}']),
+ ('\u{1e46}', ['\u{1e47}', '\u{0}', '\u{0}']), ('\u{1e48}', ['\u{1e49}', '\u{0}', '\u{0}']),
+ ('\u{1e4a}', ['\u{1e4b}', '\u{0}', '\u{0}']), ('\u{1e4c}', ['\u{1e4d}', '\u{0}', '\u{0}']),
+ ('\u{1e4e}', ['\u{1e4f}', '\u{0}', '\u{0}']), ('\u{1e50}', ['\u{1e51}', '\u{0}', '\u{0}']),
+ ('\u{1e52}', ['\u{1e53}', '\u{0}', '\u{0}']), ('\u{1e54}', ['\u{1e55}', '\u{0}', '\u{0}']),
+ ('\u{1e56}', ['\u{1e57}', '\u{0}', '\u{0}']), ('\u{1e58}', ['\u{1e59}', '\u{0}', '\u{0}']),
+ ('\u{1e5a}', ['\u{1e5b}', '\u{0}', '\u{0}']), ('\u{1e5c}', ['\u{1e5d}', '\u{0}', '\u{0}']),
+ ('\u{1e5e}', ['\u{1e5f}', '\u{0}', '\u{0}']), ('\u{1e60}', ['\u{1e61}', '\u{0}', '\u{0}']),
+ ('\u{1e62}', ['\u{1e63}', '\u{0}', '\u{0}']), ('\u{1e64}', ['\u{1e65}', '\u{0}', '\u{0}']),
+ ('\u{1e66}', ['\u{1e67}', '\u{0}', '\u{0}']), ('\u{1e68}', ['\u{1e69}', '\u{0}', '\u{0}']),
+ ('\u{1e6a}', ['\u{1e6b}', '\u{0}', '\u{0}']), ('\u{1e6c}', ['\u{1e6d}', '\u{0}', '\u{0}']),
+ ('\u{1e6e}', ['\u{1e6f}', '\u{0}', '\u{0}']), ('\u{1e70}', ['\u{1e71}', '\u{0}', '\u{0}']),
+ ('\u{1e72}', ['\u{1e73}', '\u{0}', '\u{0}']), ('\u{1e74}', ['\u{1e75}', '\u{0}', '\u{0}']),
+ ('\u{1e76}', ['\u{1e77}', '\u{0}', '\u{0}']), ('\u{1e78}', ['\u{1e79}', '\u{0}', '\u{0}']),
+ ('\u{1e7a}', ['\u{1e7b}', '\u{0}', '\u{0}']), ('\u{1e7c}', ['\u{1e7d}', '\u{0}', '\u{0}']),
+ ('\u{1e7e}', ['\u{1e7f}', '\u{0}', '\u{0}']), ('\u{1e80}', ['\u{1e81}', '\u{0}', '\u{0}']),
+ ('\u{1e82}', ['\u{1e83}', '\u{0}', '\u{0}']), ('\u{1e84}', ['\u{1e85}', '\u{0}', '\u{0}']),
+ ('\u{1e86}', ['\u{1e87}', '\u{0}', '\u{0}']), ('\u{1e88}', ['\u{1e89}', '\u{0}', '\u{0}']),
+ ('\u{1e8a}', ['\u{1e8b}', '\u{0}', '\u{0}']), ('\u{1e8c}', ['\u{1e8d}', '\u{0}', '\u{0}']),
+ ('\u{1e8e}', ['\u{1e8f}', '\u{0}', '\u{0}']), ('\u{1e90}', ['\u{1e91}', '\u{0}', '\u{0}']),
+ ('\u{1e92}', ['\u{1e93}', '\u{0}', '\u{0}']), ('\u{1e94}', ['\u{1e95}', '\u{0}', '\u{0}']),
+ ('\u{1e9e}', ['\u{df}', '\u{0}', '\u{0}']), ('\u{1ea0}', ['\u{1ea1}', '\u{0}', '\u{0}']),
+ ('\u{1ea2}', ['\u{1ea3}', '\u{0}', '\u{0}']), ('\u{1ea4}', ['\u{1ea5}', '\u{0}', '\u{0}']),
+ ('\u{1ea6}', ['\u{1ea7}', '\u{0}', '\u{0}']), ('\u{1ea8}', ['\u{1ea9}', '\u{0}', '\u{0}']),
+ ('\u{1eaa}', ['\u{1eab}', '\u{0}', '\u{0}']), ('\u{1eac}', ['\u{1ead}', '\u{0}', '\u{0}']),
+ ('\u{1eae}', ['\u{1eaf}', '\u{0}', '\u{0}']), ('\u{1eb0}', ['\u{1eb1}', '\u{0}', '\u{0}']),
+ ('\u{1eb2}', ['\u{1eb3}', '\u{0}', '\u{0}']), ('\u{1eb4}', ['\u{1eb5}', '\u{0}', '\u{0}']),
+ ('\u{1eb6}', ['\u{1eb7}', '\u{0}', '\u{0}']), ('\u{1eb8}', ['\u{1eb9}', '\u{0}', '\u{0}']),
+ ('\u{1eba}', ['\u{1ebb}', '\u{0}', '\u{0}']), ('\u{1ebc}', ['\u{1ebd}', '\u{0}', '\u{0}']),
+ ('\u{1ebe}', ['\u{1ebf}', '\u{0}', '\u{0}']), ('\u{1ec0}', ['\u{1ec1}', '\u{0}', '\u{0}']),
+ ('\u{1ec2}', ['\u{1ec3}', '\u{0}', '\u{0}']), ('\u{1ec4}', ['\u{1ec5}', '\u{0}', '\u{0}']),
+ ('\u{1ec6}', ['\u{1ec7}', '\u{0}', '\u{0}']), ('\u{1ec8}', ['\u{1ec9}', '\u{0}', '\u{0}']),
+ ('\u{1eca}', ['\u{1ecb}', '\u{0}', '\u{0}']), ('\u{1ecc}', ['\u{1ecd}', '\u{0}', '\u{0}']),
+ ('\u{1ece}', ['\u{1ecf}', '\u{0}', '\u{0}']), ('\u{1ed0}', ['\u{1ed1}', '\u{0}', '\u{0}']),
+ ('\u{1ed2}', ['\u{1ed3}', '\u{0}', '\u{0}']), ('\u{1ed4}', ['\u{1ed5}', '\u{0}', '\u{0}']),
+ ('\u{1ed6}', ['\u{1ed7}', '\u{0}', '\u{0}']), ('\u{1ed8}', ['\u{1ed9}', '\u{0}', '\u{0}']),
+ ('\u{1eda}', ['\u{1edb}', '\u{0}', '\u{0}']), ('\u{1edc}', ['\u{1edd}', '\u{0}', '\u{0}']),
+ ('\u{1ede}', ['\u{1edf}', '\u{0}', '\u{0}']), ('\u{1ee0}', ['\u{1ee1}', '\u{0}', '\u{0}']),
+ ('\u{1ee2}', ['\u{1ee3}', '\u{0}', '\u{0}']), ('\u{1ee4}', ['\u{1ee5}', '\u{0}', '\u{0}']),
+ ('\u{1ee6}', ['\u{1ee7}', '\u{0}', '\u{0}']), ('\u{1ee8}', ['\u{1ee9}', '\u{0}', '\u{0}']),
+ ('\u{1eea}', ['\u{1eeb}', '\u{0}', '\u{0}']), ('\u{1eec}', ['\u{1eed}', '\u{0}', '\u{0}']),
+ ('\u{1eee}', ['\u{1eef}', '\u{0}', '\u{0}']), ('\u{1ef0}', ['\u{1ef1}', '\u{0}', '\u{0}']),
+ ('\u{1ef2}', ['\u{1ef3}', '\u{0}', '\u{0}']), ('\u{1ef4}', ['\u{1ef5}', '\u{0}', '\u{0}']),
+ ('\u{1ef6}', ['\u{1ef7}', '\u{0}', '\u{0}']), ('\u{1ef8}', ['\u{1ef9}', '\u{0}', '\u{0}']),
+ ('\u{1efa}', ['\u{1efb}', '\u{0}', '\u{0}']), ('\u{1efc}', ['\u{1efd}', '\u{0}', '\u{0}']),
+ ('\u{1efe}', ['\u{1eff}', '\u{0}', '\u{0}']), ('\u{1f08}', ['\u{1f00}', '\u{0}', '\u{0}']),
+ ('\u{1f09}', ['\u{1f01}', '\u{0}', '\u{0}']), ('\u{1f0a}', ['\u{1f02}', '\u{0}', '\u{0}']),
+ ('\u{1f0b}', ['\u{1f03}', '\u{0}', '\u{0}']), ('\u{1f0c}', ['\u{1f04}', '\u{0}', '\u{0}']),
+ ('\u{1f0d}', ['\u{1f05}', '\u{0}', '\u{0}']), ('\u{1f0e}', ['\u{1f06}', '\u{0}', '\u{0}']),
+ ('\u{1f0f}', ['\u{1f07}', '\u{0}', '\u{0}']), ('\u{1f18}', ['\u{1f10}', '\u{0}', '\u{0}']),
+ ('\u{1f19}', ['\u{1f11}', '\u{0}', '\u{0}']), ('\u{1f1a}', ['\u{1f12}', '\u{0}', '\u{0}']),
+ ('\u{1f1b}', ['\u{1f13}', '\u{0}', '\u{0}']), ('\u{1f1c}', ['\u{1f14}', '\u{0}', '\u{0}']),
+ ('\u{1f1d}', ['\u{1f15}', '\u{0}', '\u{0}']), ('\u{1f28}', ['\u{1f20}', '\u{0}', '\u{0}']),
+ ('\u{1f29}', ['\u{1f21}', '\u{0}', '\u{0}']), ('\u{1f2a}', ['\u{1f22}', '\u{0}', '\u{0}']),
+ ('\u{1f2b}', ['\u{1f23}', '\u{0}', '\u{0}']), ('\u{1f2c}', ['\u{1f24}', '\u{0}', '\u{0}']),
+ ('\u{1f2d}', ['\u{1f25}', '\u{0}', '\u{0}']), ('\u{1f2e}', ['\u{1f26}', '\u{0}', '\u{0}']),
+ ('\u{1f2f}', ['\u{1f27}', '\u{0}', '\u{0}']), ('\u{1f38}', ['\u{1f30}', '\u{0}', '\u{0}']),
+ ('\u{1f39}', ['\u{1f31}', '\u{0}', '\u{0}']), ('\u{1f3a}', ['\u{1f32}', '\u{0}', '\u{0}']),
+ ('\u{1f3b}', ['\u{1f33}', '\u{0}', '\u{0}']), ('\u{1f3c}', ['\u{1f34}', '\u{0}', '\u{0}']),
+ ('\u{1f3d}', ['\u{1f35}', '\u{0}', '\u{0}']), ('\u{1f3e}', ['\u{1f36}', '\u{0}', '\u{0}']),
+ ('\u{1f3f}', ['\u{1f37}', '\u{0}', '\u{0}']), ('\u{1f48}', ['\u{1f40}', '\u{0}', '\u{0}']),
+ ('\u{1f49}', ['\u{1f41}', '\u{0}', '\u{0}']), ('\u{1f4a}', ['\u{1f42}', '\u{0}', '\u{0}']),
+ ('\u{1f4b}', ['\u{1f43}', '\u{0}', '\u{0}']), ('\u{1f4c}', ['\u{1f44}', '\u{0}', '\u{0}']),
+ ('\u{1f4d}', ['\u{1f45}', '\u{0}', '\u{0}']), ('\u{1f59}', ['\u{1f51}', '\u{0}', '\u{0}']),
+ ('\u{1f5b}', ['\u{1f53}', '\u{0}', '\u{0}']), ('\u{1f5d}', ['\u{1f55}', '\u{0}', '\u{0}']),
+ ('\u{1f5f}', ['\u{1f57}', '\u{0}', '\u{0}']), ('\u{1f68}', ['\u{1f60}', '\u{0}', '\u{0}']),
+ ('\u{1f69}', ['\u{1f61}', '\u{0}', '\u{0}']), ('\u{1f6a}', ['\u{1f62}', '\u{0}', '\u{0}']),
+ ('\u{1f6b}', ['\u{1f63}', '\u{0}', '\u{0}']), ('\u{1f6c}', ['\u{1f64}', '\u{0}', '\u{0}']),
+ ('\u{1f6d}', ['\u{1f65}', '\u{0}', '\u{0}']), ('\u{1f6e}', ['\u{1f66}', '\u{0}', '\u{0}']),
+ ('\u{1f6f}', ['\u{1f67}', '\u{0}', '\u{0}']), ('\u{1f88}', ['\u{1f80}', '\u{0}', '\u{0}']),
+ ('\u{1f89}', ['\u{1f81}', '\u{0}', '\u{0}']), ('\u{1f8a}', ['\u{1f82}', '\u{0}', '\u{0}']),
+ ('\u{1f8b}', ['\u{1f83}', '\u{0}', '\u{0}']), ('\u{1f8c}', ['\u{1f84}', '\u{0}', '\u{0}']),
+ ('\u{1f8d}', ['\u{1f85}', '\u{0}', '\u{0}']), ('\u{1f8e}', ['\u{1f86}', '\u{0}', '\u{0}']),
+ ('\u{1f8f}', ['\u{1f87}', '\u{0}', '\u{0}']), ('\u{1f98}', ['\u{1f90}', '\u{0}', '\u{0}']),
+ ('\u{1f99}', ['\u{1f91}', '\u{0}', '\u{0}']), ('\u{1f9a}', ['\u{1f92}', '\u{0}', '\u{0}']),
+ ('\u{1f9b}', ['\u{1f93}', '\u{0}', '\u{0}']), ('\u{1f9c}', ['\u{1f94}', '\u{0}', '\u{0}']),
+ ('\u{1f9d}', ['\u{1f95}', '\u{0}', '\u{0}']), ('\u{1f9e}', ['\u{1f96}', '\u{0}', '\u{0}']),
+ ('\u{1f9f}', ['\u{1f97}', '\u{0}', '\u{0}']), ('\u{1fa8}', ['\u{1fa0}', '\u{0}', '\u{0}']),
+ ('\u{1fa9}', ['\u{1fa1}', '\u{0}', '\u{0}']), ('\u{1faa}', ['\u{1fa2}', '\u{0}', '\u{0}']),
+ ('\u{1fab}', ['\u{1fa3}', '\u{0}', '\u{0}']), ('\u{1fac}', ['\u{1fa4}', '\u{0}', '\u{0}']),
+ ('\u{1fad}', ['\u{1fa5}', '\u{0}', '\u{0}']), ('\u{1fae}', ['\u{1fa6}', '\u{0}', '\u{0}']),
+ ('\u{1faf}', ['\u{1fa7}', '\u{0}', '\u{0}']), ('\u{1fb8}', ['\u{1fb0}', '\u{0}', '\u{0}']),
+ ('\u{1fb9}', ['\u{1fb1}', '\u{0}', '\u{0}']), ('\u{1fba}', ['\u{1f70}', '\u{0}', '\u{0}']),
+ ('\u{1fbb}', ['\u{1f71}', '\u{0}', '\u{0}']), ('\u{1fbc}', ['\u{1fb3}', '\u{0}', '\u{0}']),
+ ('\u{1fc8}', ['\u{1f72}', '\u{0}', '\u{0}']), ('\u{1fc9}', ['\u{1f73}', '\u{0}', '\u{0}']),
+ ('\u{1fca}', ['\u{1f74}', '\u{0}', '\u{0}']), ('\u{1fcb}', ['\u{1f75}', '\u{0}', '\u{0}']),
+ ('\u{1fcc}', ['\u{1fc3}', '\u{0}', '\u{0}']), ('\u{1fd8}', ['\u{1fd0}', '\u{0}', '\u{0}']),
+ ('\u{1fd9}', ['\u{1fd1}', '\u{0}', '\u{0}']), ('\u{1fda}', ['\u{1f76}', '\u{0}', '\u{0}']),
+ ('\u{1fdb}', ['\u{1f77}', '\u{0}', '\u{0}']), ('\u{1fe8}', ['\u{1fe0}', '\u{0}', '\u{0}']),
+ ('\u{1fe9}', ['\u{1fe1}', '\u{0}', '\u{0}']), ('\u{1fea}', ['\u{1f7a}', '\u{0}', '\u{0}']),
+ ('\u{1feb}', ['\u{1f7b}', '\u{0}', '\u{0}']), ('\u{1fec}', ['\u{1fe5}', '\u{0}', '\u{0}']),
+ ('\u{1ff8}', ['\u{1f78}', '\u{0}', '\u{0}']), ('\u{1ff9}', ['\u{1f79}', '\u{0}', '\u{0}']),
+ ('\u{1ffa}', ['\u{1f7c}', '\u{0}', '\u{0}']), ('\u{1ffb}', ['\u{1f7d}', '\u{0}', '\u{0}']),
+ ('\u{1ffc}', ['\u{1ff3}', '\u{0}', '\u{0}']), ('\u{2126}', ['\u{3c9}', '\u{0}', '\u{0}']),
+ ('\u{212a}', ['k', '\u{0}', '\u{0}']), ('\u{212b}', ['\u{e5}', '\u{0}', '\u{0}']),
+ ('\u{2132}', ['\u{214e}', '\u{0}', '\u{0}']), ('\u{2160}', ['\u{2170}', '\u{0}', '\u{0}']),
+ ('\u{2161}', ['\u{2171}', '\u{0}', '\u{0}']), ('\u{2162}', ['\u{2172}', '\u{0}', '\u{0}']),
+ ('\u{2163}', ['\u{2173}', '\u{0}', '\u{0}']), ('\u{2164}', ['\u{2174}', '\u{0}', '\u{0}']),
+ ('\u{2165}', ['\u{2175}', '\u{0}', '\u{0}']), ('\u{2166}', ['\u{2176}', '\u{0}', '\u{0}']),
+ ('\u{2167}', ['\u{2177}', '\u{0}', '\u{0}']), ('\u{2168}', ['\u{2178}', '\u{0}', '\u{0}']),
+ ('\u{2169}', ['\u{2179}', '\u{0}', '\u{0}']), ('\u{216a}', ['\u{217a}', '\u{0}', '\u{0}']),
+ ('\u{216b}', ['\u{217b}', '\u{0}', '\u{0}']), ('\u{216c}', ['\u{217c}', '\u{0}', '\u{0}']),
+ ('\u{216d}', ['\u{217d}', '\u{0}', '\u{0}']), ('\u{216e}', ['\u{217e}', '\u{0}', '\u{0}']),
+ ('\u{216f}', ['\u{217f}', '\u{0}', '\u{0}']), ('\u{2183}', ['\u{2184}', '\u{0}', '\u{0}']),
+ ('\u{24b6}', ['\u{24d0}', '\u{0}', '\u{0}']), ('\u{24b7}', ['\u{24d1}', '\u{0}', '\u{0}']),
+ ('\u{24b8}', ['\u{24d2}', '\u{0}', '\u{0}']), ('\u{24b9}', ['\u{24d3}', '\u{0}', '\u{0}']),
+ ('\u{24ba}', ['\u{24d4}', '\u{0}', '\u{0}']), ('\u{24bb}', ['\u{24d5}', '\u{0}', '\u{0}']),
+ ('\u{24bc}', ['\u{24d6}', '\u{0}', '\u{0}']), ('\u{24bd}', ['\u{24d7}', '\u{0}', '\u{0}']),
+ ('\u{24be}', ['\u{24d8}', '\u{0}', '\u{0}']), ('\u{24bf}', ['\u{24d9}', '\u{0}', '\u{0}']),
+ ('\u{24c0}', ['\u{24da}', '\u{0}', '\u{0}']), ('\u{24c1}', ['\u{24db}', '\u{0}', '\u{0}']),
+ ('\u{24c2}', ['\u{24dc}', '\u{0}', '\u{0}']), ('\u{24c3}', ['\u{24dd}', '\u{0}', '\u{0}']),
+ ('\u{24c4}', ['\u{24de}', '\u{0}', '\u{0}']), ('\u{24c5}', ['\u{24df}', '\u{0}', '\u{0}']),
+ ('\u{24c6}', ['\u{24e0}', '\u{0}', '\u{0}']), ('\u{24c7}', ['\u{24e1}', '\u{0}', '\u{0}']),
+ ('\u{24c8}', ['\u{24e2}', '\u{0}', '\u{0}']), ('\u{24c9}', ['\u{24e3}', '\u{0}', '\u{0}']),
+ ('\u{24ca}', ['\u{24e4}', '\u{0}', '\u{0}']), ('\u{24cb}', ['\u{24e5}', '\u{0}', '\u{0}']),
+ ('\u{24cc}', ['\u{24e6}', '\u{0}', '\u{0}']), ('\u{24cd}', ['\u{24e7}', '\u{0}', '\u{0}']),
+ ('\u{24ce}', ['\u{24e8}', '\u{0}', '\u{0}']), ('\u{24cf}', ['\u{24e9}', '\u{0}', '\u{0}']),
+ ('\u{2c00}', ['\u{2c30}', '\u{0}', '\u{0}']), ('\u{2c01}', ['\u{2c31}', '\u{0}', '\u{0}']),
+ ('\u{2c02}', ['\u{2c32}', '\u{0}', '\u{0}']), ('\u{2c03}', ['\u{2c33}', '\u{0}', '\u{0}']),
+ ('\u{2c04}', ['\u{2c34}', '\u{0}', '\u{0}']), ('\u{2c05}', ['\u{2c35}', '\u{0}', '\u{0}']),
+ ('\u{2c06}', ['\u{2c36}', '\u{0}', '\u{0}']), ('\u{2c07}', ['\u{2c37}', '\u{0}', '\u{0}']),
+ ('\u{2c08}', ['\u{2c38}', '\u{0}', '\u{0}']), ('\u{2c09}', ['\u{2c39}', '\u{0}', '\u{0}']),
+ ('\u{2c0a}', ['\u{2c3a}', '\u{0}', '\u{0}']), ('\u{2c0b}', ['\u{2c3b}', '\u{0}', '\u{0}']),
+ ('\u{2c0c}', ['\u{2c3c}', '\u{0}', '\u{0}']), ('\u{2c0d}', ['\u{2c3d}', '\u{0}', '\u{0}']),
+ ('\u{2c0e}', ['\u{2c3e}', '\u{0}', '\u{0}']), ('\u{2c0f}', ['\u{2c3f}', '\u{0}', '\u{0}']),
+ ('\u{2c10}', ['\u{2c40}', '\u{0}', '\u{0}']), ('\u{2c11}', ['\u{2c41}', '\u{0}', '\u{0}']),
+ ('\u{2c12}', ['\u{2c42}', '\u{0}', '\u{0}']), ('\u{2c13}', ['\u{2c43}', '\u{0}', '\u{0}']),
+ ('\u{2c14}', ['\u{2c44}', '\u{0}', '\u{0}']), ('\u{2c15}', ['\u{2c45}', '\u{0}', '\u{0}']),
+ ('\u{2c16}', ['\u{2c46}', '\u{0}', '\u{0}']), ('\u{2c17}', ['\u{2c47}', '\u{0}', '\u{0}']),
+ ('\u{2c18}', ['\u{2c48}', '\u{0}', '\u{0}']), ('\u{2c19}', ['\u{2c49}', '\u{0}', '\u{0}']),
+ ('\u{2c1a}', ['\u{2c4a}', '\u{0}', '\u{0}']), ('\u{2c1b}', ['\u{2c4b}', '\u{0}', '\u{0}']),
+ ('\u{2c1c}', ['\u{2c4c}', '\u{0}', '\u{0}']), ('\u{2c1d}', ['\u{2c4d}', '\u{0}', '\u{0}']),
+ ('\u{2c1e}', ['\u{2c4e}', '\u{0}', '\u{0}']), ('\u{2c1f}', ['\u{2c4f}', '\u{0}', '\u{0}']),
+ ('\u{2c20}', ['\u{2c50}', '\u{0}', '\u{0}']), ('\u{2c21}', ['\u{2c51}', '\u{0}', '\u{0}']),
+ ('\u{2c22}', ['\u{2c52}', '\u{0}', '\u{0}']), ('\u{2c23}', ['\u{2c53}', '\u{0}', '\u{0}']),
+ ('\u{2c24}', ['\u{2c54}', '\u{0}', '\u{0}']), ('\u{2c25}', ['\u{2c55}', '\u{0}', '\u{0}']),
+ ('\u{2c26}', ['\u{2c56}', '\u{0}', '\u{0}']), ('\u{2c27}', ['\u{2c57}', '\u{0}', '\u{0}']),
+ ('\u{2c28}', ['\u{2c58}', '\u{0}', '\u{0}']), ('\u{2c29}', ['\u{2c59}', '\u{0}', '\u{0}']),
+ ('\u{2c2a}', ['\u{2c5a}', '\u{0}', '\u{0}']), ('\u{2c2b}', ['\u{2c5b}', '\u{0}', '\u{0}']),
+ ('\u{2c2c}', ['\u{2c5c}', '\u{0}', '\u{0}']), ('\u{2c2d}', ['\u{2c5d}', '\u{0}', '\u{0}']),
+ ('\u{2c2e}', ['\u{2c5e}', '\u{0}', '\u{0}']), ('\u{2c60}', ['\u{2c61}', '\u{0}', '\u{0}']),
+ ('\u{2c62}', ['\u{26b}', '\u{0}', '\u{0}']), ('\u{2c63}', ['\u{1d7d}', '\u{0}', '\u{0}']),
+ ('\u{2c64}', ['\u{27d}', '\u{0}', '\u{0}']), ('\u{2c67}', ['\u{2c68}', '\u{0}', '\u{0}']),
+ ('\u{2c69}', ['\u{2c6a}', '\u{0}', '\u{0}']), ('\u{2c6b}', ['\u{2c6c}', '\u{0}', '\u{0}']),
+ ('\u{2c6d}', ['\u{251}', '\u{0}', '\u{0}']), ('\u{2c6e}', ['\u{271}', '\u{0}', '\u{0}']),
+ ('\u{2c6f}', ['\u{250}', '\u{0}', '\u{0}']), ('\u{2c70}', ['\u{252}', '\u{0}', '\u{0}']),
+ ('\u{2c72}', ['\u{2c73}', '\u{0}', '\u{0}']), ('\u{2c75}', ['\u{2c76}', '\u{0}', '\u{0}']),
+ ('\u{2c7e}', ['\u{23f}', '\u{0}', '\u{0}']), ('\u{2c7f}', ['\u{240}', '\u{0}', '\u{0}']),
+ ('\u{2c80}', ['\u{2c81}', '\u{0}', '\u{0}']), ('\u{2c82}', ['\u{2c83}', '\u{0}', '\u{0}']),
+ ('\u{2c84}', ['\u{2c85}', '\u{0}', '\u{0}']), ('\u{2c86}', ['\u{2c87}', '\u{0}', '\u{0}']),
+ ('\u{2c88}', ['\u{2c89}', '\u{0}', '\u{0}']), ('\u{2c8a}', ['\u{2c8b}', '\u{0}', '\u{0}']),
+ ('\u{2c8c}', ['\u{2c8d}', '\u{0}', '\u{0}']), ('\u{2c8e}', ['\u{2c8f}', '\u{0}', '\u{0}']),
+ ('\u{2c90}', ['\u{2c91}', '\u{0}', '\u{0}']), ('\u{2c92}', ['\u{2c93}', '\u{0}', '\u{0}']),
+ ('\u{2c94}', ['\u{2c95}', '\u{0}', '\u{0}']), ('\u{2c96}', ['\u{2c97}', '\u{0}', '\u{0}']),
+ ('\u{2c98}', ['\u{2c99}', '\u{0}', '\u{0}']), ('\u{2c9a}', ['\u{2c9b}', '\u{0}', '\u{0}']),
+ ('\u{2c9c}', ['\u{2c9d}', '\u{0}', '\u{0}']), ('\u{2c9e}', ['\u{2c9f}', '\u{0}', '\u{0}']),
+ ('\u{2ca0}', ['\u{2ca1}', '\u{0}', '\u{0}']), ('\u{2ca2}', ['\u{2ca3}', '\u{0}', '\u{0}']),
+ ('\u{2ca4}', ['\u{2ca5}', '\u{0}', '\u{0}']), ('\u{2ca6}', ['\u{2ca7}', '\u{0}', '\u{0}']),
+ ('\u{2ca8}', ['\u{2ca9}', '\u{0}', '\u{0}']), ('\u{2caa}', ['\u{2cab}', '\u{0}', '\u{0}']),
+ ('\u{2cac}', ['\u{2cad}', '\u{0}', '\u{0}']), ('\u{2cae}', ['\u{2caf}', '\u{0}', '\u{0}']),
+ ('\u{2cb0}', ['\u{2cb1}', '\u{0}', '\u{0}']), ('\u{2cb2}', ['\u{2cb3}', '\u{0}', '\u{0}']),
+ ('\u{2cb4}', ['\u{2cb5}', '\u{0}', '\u{0}']), ('\u{2cb6}', ['\u{2cb7}', '\u{0}', '\u{0}']),
+ ('\u{2cb8}', ['\u{2cb9}', '\u{0}', '\u{0}']), ('\u{2cba}', ['\u{2cbb}', '\u{0}', '\u{0}']),
+ ('\u{2cbc}', ['\u{2cbd}', '\u{0}', '\u{0}']), ('\u{2cbe}', ['\u{2cbf}', '\u{0}', '\u{0}']),
+ ('\u{2cc0}', ['\u{2cc1}', '\u{0}', '\u{0}']), ('\u{2cc2}', ['\u{2cc3}', '\u{0}', '\u{0}']),
+ ('\u{2cc4}', ['\u{2cc5}', '\u{0}', '\u{0}']), ('\u{2cc6}', ['\u{2cc7}', '\u{0}', '\u{0}']),
+ ('\u{2cc8}', ['\u{2cc9}', '\u{0}', '\u{0}']), ('\u{2cca}', ['\u{2ccb}', '\u{0}', '\u{0}']),
+ ('\u{2ccc}', ['\u{2ccd}', '\u{0}', '\u{0}']), ('\u{2cce}', ['\u{2ccf}', '\u{0}', '\u{0}']),
+ ('\u{2cd0}', ['\u{2cd1}', '\u{0}', '\u{0}']), ('\u{2cd2}', ['\u{2cd3}', '\u{0}', '\u{0}']),
+ ('\u{2cd4}', ['\u{2cd5}', '\u{0}', '\u{0}']), ('\u{2cd6}', ['\u{2cd7}', '\u{0}', '\u{0}']),
+ ('\u{2cd8}', ['\u{2cd9}', '\u{0}', '\u{0}']), ('\u{2cda}', ['\u{2cdb}', '\u{0}', '\u{0}']),
+ ('\u{2cdc}', ['\u{2cdd}', '\u{0}', '\u{0}']), ('\u{2cde}', ['\u{2cdf}', '\u{0}', '\u{0}']),
+ ('\u{2ce0}', ['\u{2ce1}', '\u{0}', '\u{0}']), ('\u{2ce2}', ['\u{2ce3}', '\u{0}', '\u{0}']),
+ ('\u{2ceb}', ['\u{2cec}', '\u{0}', '\u{0}']), ('\u{2ced}', ['\u{2cee}', '\u{0}', '\u{0}']),
+ ('\u{2cf2}', ['\u{2cf3}', '\u{0}', '\u{0}']), ('\u{a640}', ['\u{a641}', '\u{0}', '\u{0}']),
+ ('\u{a642}', ['\u{a643}', '\u{0}', '\u{0}']), ('\u{a644}', ['\u{a645}', '\u{0}', '\u{0}']),
+ ('\u{a646}', ['\u{a647}', '\u{0}', '\u{0}']), ('\u{a648}', ['\u{a649}', '\u{0}', '\u{0}']),
+ ('\u{a64a}', ['\u{a64b}', '\u{0}', '\u{0}']), ('\u{a64c}', ['\u{a64d}', '\u{0}', '\u{0}']),
+ ('\u{a64e}', ['\u{a64f}', '\u{0}', '\u{0}']), ('\u{a650}', ['\u{a651}', '\u{0}', '\u{0}']),
+ ('\u{a652}', ['\u{a653}', '\u{0}', '\u{0}']), ('\u{a654}', ['\u{a655}', '\u{0}', '\u{0}']),
+ ('\u{a656}', ['\u{a657}', '\u{0}', '\u{0}']), ('\u{a658}', ['\u{a659}', '\u{0}', '\u{0}']),
+ ('\u{a65a}', ['\u{a65b}', '\u{0}', '\u{0}']), ('\u{a65c}', ['\u{a65d}', '\u{0}', '\u{0}']),
+ ('\u{a65e}', ['\u{a65f}', '\u{0}', '\u{0}']), ('\u{a660}', ['\u{a661}', '\u{0}', '\u{0}']),
+ ('\u{a662}', ['\u{a663}', '\u{0}', '\u{0}']), ('\u{a664}', ['\u{a665}', '\u{0}', '\u{0}']),
+ ('\u{a666}', ['\u{a667}', '\u{0}', '\u{0}']), ('\u{a668}', ['\u{a669}', '\u{0}', '\u{0}']),
+ ('\u{a66a}', ['\u{a66b}', '\u{0}', '\u{0}']), ('\u{a66c}', ['\u{a66d}', '\u{0}', '\u{0}']),
+ ('\u{a680}', ['\u{a681}', '\u{0}', '\u{0}']), ('\u{a682}', ['\u{a683}', '\u{0}', '\u{0}']),
+ ('\u{a684}', ['\u{a685}', '\u{0}', '\u{0}']), ('\u{a686}', ['\u{a687}', '\u{0}', '\u{0}']),
+ ('\u{a688}', ['\u{a689}', '\u{0}', '\u{0}']), ('\u{a68a}', ['\u{a68b}', '\u{0}', '\u{0}']),
+ ('\u{a68c}', ['\u{a68d}', '\u{0}', '\u{0}']), ('\u{a68e}', ['\u{a68f}', '\u{0}', '\u{0}']),
+ ('\u{a690}', ['\u{a691}', '\u{0}', '\u{0}']), ('\u{a692}', ['\u{a693}', '\u{0}', '\u{0}']),
+ ('\u{a694}', ['\u{a695}', '\u{0}', '\u{0}']), ('\u{a696}', ['\u{a697}', '\u{0}', '\u{0}']),
+ ('\u{a698}', ['\u{a699}', '\u{0}', '\u{0}']), ('\u{a69a}', ['\u{a69b}', '\u{0}', '\u{0}']),
+ ('\u{a722}', ['\u{a723}', '\u{0}', '\u{0}']), ('\u{a724}', ['\u{a725}', '\u{0}', '\u{0}']),
+ ('\u{a726}', ['\u{a727}', '\u{0}', '\u{0}']), ('\u{a728}', ['\u{a729}', '\u{0}', '\u{0}']),
+ ('\u{a72a}', ['\u{a72b}', '\u{0}', '\u{0}']), ('\u{a72c}', ['\u{a72d}', '\u{0}', '\u{0}']),
+ ('\u{a72e}', ['\u{a72f}', '\u{0}', '\u{0}']), ('\u{a732}', ['\u{a733}', '\u{0}', '\u{0}']),
+ ('\u{a734}', ['\u{a735}', '\u{0}', '\u{0}']), ('\u{a736}', ['\u{a737}', '\u{0}', '\u{0}']),
+ ('\u{a738}', ['\u{a739}', '\u{0}', '\u{0}']), ('\u{a73a}', ['\u{a73b}', '\u{0}', '\u{0}']),
+ ('\u{a73c}', ['\u{a73d}', '\u{0}', '\u{0}']), ('\u{a73e}', ['\u{a73f}', '\u{0}', '\u{0}']),
+ ('\u{a740}', ['\u{a741}', '\u{0}', '\u{0}']), ('\u{a742}', ['\u{a743}', '\u{0}', '\u{0}']),
+ ('\u{a744}', ['\u{a745}', '\u{0}', '\u{0}']), ('\u{a746}', ['\u{a747}', '\u{0}', '\u{0}']),
+ ('\u{a748}', ['\u{a749}', '\u{0}', '\u{0}']), ('\u{a74a}', ['\u{a74b}', '\u{0}', '\u{0}']),
+ ('\u{a74c}', ['\u{a74d}', '\u{0}', '\u{0}']), ('\u{a74e}', ['\u{a74f}', '\u{0}', '\u{0}']),
+ ('\u{a750}', ['\u{a751}', '\u{0}', '\u{0}']), ('\u{a752}', ['\u{a753}', '\u{0}', '\u{0}']),
+ ('\u{a754}', ['\u{a755}', '\u{0}', '\u{0}']), ('\u{a756}', ['\u{a757}', '\u{0}', '\u{0}']),
+ ('\u{a758}', ['\u{a759}', '\u{0}', '\u{0}']), ('\u{a75a}', ['\u{a75b}', '\u{0}', '\u{0}']),
+ ('\u{a75c}', ['\u{a75d}', '\u{0}', '\u{0}']), ('\u{a75e}', ['\u{a75f}', '\u{0}', '\u{0}']),
+ ('\u{a760}', ['\u{a761}', '\u{0}', '\u{0}']), ('\u{a762}', ['\u{a763}', '\u{0}', '\u{0}']),
+ ('\u{a764}', ['\u{a765}', '\u{0}', '\u{0}']), ('\u{a766}', ['\u{a767}', '\u{0}', '\u{0}']),
+ ('\u{a768}', ['\u{a769}', '\u{0}', '\u{0}']), ('\u{a76a}', ['\u{a76b}', '\u{0}', '\u{0}']),
+ ('\u{a76c}', ['\u{a76d}', '\u{0}', '\u{0}']), ('\u{a76e}', ['\u{a76f}', '\u{0}', '\u{0}']),
+ ('\u{a779}', ['\u{a77a}', '\u{0}', '\u{0}']), ('\u{a77b}', ['\u{a77c}', '\u{0}', '\u{0}']),
+ ('\u{a77d}', ['\u{1d79}', '\u{0}', '\u{0}']), ('\u{a77e}', ['\u{a77f}', '\u{0}', '\u{0}']),
+ ('\u{a780}', ['\u{a781}', '\u{0}', '\u{0}']), ('\u{a782}', ['\u{a783}', '\u{0}', '\u{0}']),
+ ('\u{a784}', ['\u{a785}', '\u{0}', '\u{0}']), ('\u{a786}', ['\u{a787}', '\u{0}', '\u{0}']),
+ ('\u{a78b}', ['\u{a78c}', '\u{0}', '\u{0}']), ('\u{a78d}', ['\u{265}', '\u{0}', '\u{0}']),
+ ('\u{a790}', ['\u{a791}', '\u{0}', '\u{0}']), ('\u{a792}', ['\u{a793}', '\u{0}', '\u{0}']),
+ ('\u{a796}', ['\u{a797}', '\u{0}', '\u{0}']), ('\u{a798}', ['\u{a799}', '\u{0}', '\u{0}']),
+ ('\u{a79a}', ['\u{a79b}', '\u{0}', '\u{0}']), ('\u{a79c}', ['\u{a79d}', '\u{0}', '\u{0}']),
+ ('\u{a79e}', ['\u{a79f}', '\u{0}', '\u{0}']), ('\u{a7a0}', ['\u{a7a1}', '\u{0}', '\u{0}']),
+ ('\u{a7a2}', ['\u{a7a3}', '\u{0}', '\u{0}']), ('\u{a7a4}', ['\u{a7a5}', '\u{0}', '\u{0}']),
+ ('\u{a7a6}', ['\u{a7a7}', '\u{0}', '\u{0}']), ('\u{a7a8}', ['\u{a7a9}', '\u{0}', '\u{0}']),
+ ('\u{a7aa}', ['\u{266}', '\u{0}', '\u{0}']), ('\u{a7ab}', ['\u{25c}', '\u{0}', '\u{0}']),
+ ('\u{a7ac}', ['\u{261}', '\u{0}', '\u{0}']), ('\u{a7ad}', ['\u{26c}', '\u{0}', '\u{0}']),
+ ('\u{a7ae}', ['\u{26a}', '\u{0}', '\u{0}']), ('\u{a7b0}', ['\u{29e}', '\u{0}', '\u{0}']),
+ ('\u{a7b1}', ['\u{287}', '\u{0}', '\u{0}']), ('\u{a7b2}', ['\u{29d}', '\u{0}', '\u{0}']),
+ ('\u{a7b3}', ['\u{ab53}', '\u{0}', '\u{0}']), ('\u{a7b4}', ['\u{a7b5}', '\u{0}', '\u{0}']),
+ ('\u{a7b6}', ['\u{a7b7}', '\u{0}', '\u{0}']), ('\u{a7b8}', ['\u{a7b9}', '\u{0}', '\u{0}']),
+ ('\u{a7ba}', ['\u{a7bb}', '\u{0}', '\u{0}']), ('\u{a7bc}', ['\u{a7bd}', '\u{0}', '\u{0}']),
+ ('\u{a7be}', ['\u{a7bf}', '\u{0}', '\u{0}']), ('\u{a7c2}', ['\u{a7c3}', '\u{0}', '\u{0}']),
+ ('\u{a7c4}', ['\u{a794}', '\u{0}', '\u{0}']), ('\u{a7c5}', ['\u{282}', '\u{0}', '\u{0}']),
+ ('\u{a7c6}', ['\u{1d8e}', '\u{0}', '\u{0}']), ('\u{a7c7}', ['\u{a7c8}', '\u{0}', '\u{0}']),
+ ('\u{a7c9}', ['\u{a7ca}', '\u{0}', '\u{0}']), ('\u{a7f5}', ['\u{a7f6}', '\u{0}', '\u{0}']),
+ ('\u{ff21}', ['\u{ff41}', '\u{0}', '\u{0}']), ('\u{ff22}', ['\u{ff42}', '\u{0}', '\u{0}']),
+ ('\u{ff23}', ['\u{ff43}', '\u{0}', '\u{0}']), ('\u{ff24}', ['\u{ff44}', '\u{0}', '\u{0}']),
+ ('\u{ff25}', ['\u{ff45}', '\u{0}', '\u{0}']), ('\u{ff26}', ['\u{ff46}', '\u{0}', '\u{0}']),
+ ('\u{ff27}', ['\u{ff47}', '\u{0}', '\u{0}']), ('\u{ff28}', ['\u{ff48}', '\u{0}', '\u{0}']),
+ ('\u{ff29}', ['\u{ff49}', '\u{0}', '\u{0}']), ('\u{ff2a}', ['\u{ff4a}', '\u{0}', '\u{0}']),
+ ('\u{ff2b}', ['\u{ff4b}', '\u{0}', '\u{0}']), ('\u{ff2c}', ['\u{ff4c}', '\u{0}', '\u{0}']),
+ ('\u{ff2d}', ['\u{ff4d}', '\u{0}', '\u{0}']), ('\u{ff2e}', ['\u{ff4e}', '\u{0}', '\u{0}']),
+ ('\u{ff2f}', ['\u{ff4f}', '\u{0}', '\u{0}']), ('\u{ff30}', ['\u{ff50}', '\u{0}', '\u{0}']),
+ ('\u{ff31}', ['\u{ff51}', '\u{0}', '\u{0}']), ('\u{ff32}', ['\u{ff52}', '\u{0}', '\u{0}']),
+ ('\u{ff33}', ['\u{ff53}', '\u{0}', '\u{0}']), ('\u{ff34}', ['\u{ff54}', '\u{0}', '\u{0}']),
+ ('\u{ff35}', ['\u{ff55}', '\u{0}', '\u{0}']), ('\u{ff36}', ['\u{ff56}', '\u{0}', '\u{0}']),
+ ('\u{ff37}', ['\u{ff57}', '\u{0}', '\u{0}']), ('\u{ff38}', ['\u{ff58}', '\u{0}', '\u{0}']),
+ ('\u{ff39}', ['\u{ff59}', '\u{0}', '\u{0}']), ('\u{ff3a}', ['\u{ff5a}', '\u{0}', '\u{0}']),
+ ('\u{10400}', ['\u{10428}', '\u{0}', '\u{0}']),
+ ('\u{10401}', ['\u{10429}', '\u{0}', '\u{0}']),
+ ('\u{10402}', ['\u{1042a}', '\u{0}', '\u{0}']),
+ ('\u{10403}', ['\u{1042b}', '\u{0}', '\u{0}']),
+ ('\u{10404}', ['\u{1042c}', '\u{0}', '\u{0}']),
+ ('\u{10405}', ['\u{1042d}', '\u{0}', '\u{0}']),
+ ('\u{10406}', ['\u{1042e}', '\u{0}', '\u{0}']),
+ ('\u{10407}', ['\u{1042f}', '\u{0}', '\u{0}']),
+ ('\u{10408}', ['\u{10430}', '\u{0}', '\u{0}']),
+ ('\u{10409}', ['\u{10431}', '\u{0}', '\u{0}']),
+ ('\u{1040a}', ['\u{10432}', '\u{0}', '\u{0}']),
+ ('\u{1040b}', ['\u{10433}', '\u{0}', '\u{0}']),
+ ('\u{1040c}', ['\u{10434}', '\u{0}', '\u{0}']),
+ ('\u{1040d}', ['\u{10435}', '\u{0}', '\u{0}']),
+ ('\u{1040e}', ['\u{10436}', '\u{0}', '\u{0}']),
+ ('\u{1040f}', ['\u{10437}', '\u{0}', '\u{0}']),
+ ('\u{10410}', ['\u{10438}', '\u{0}', '\u{0}']),
+ ('\u{10411}', ['\u{10439}', '\u{0}', '\u{0}']),
+ ('\u{10412}', ['\u{1043a}', '\u{0}', '\u{0}']),
+ ('\u{10413}', ['\u{1043b}', '\u{0}', '\u{0}']),
+ ('\u{10414}', ['\u{1043c}', '\u{0}', '\u{0}']),
+ ('\u{10415}', ['\u{1043d}', '\u{0}', '\u{0}']),
+ ('\u{10416}', ['\u{1043e}', '\u{0}', '\u{0}']),
+ ('\u{10417}', ['\u{1043f}', '\u{0}', '\u{0}']),
+ ('\u{10418}', ['\u{10440}', '\u{0}', '\u{0}']),
+ ('\u{10419}', ['\u{10441}', '\u{0}', '\u{0}']),
+ ('\u{1041a}', ['\u{10442}', '\u{0}', '\u{0}']),
+ ('\u{1041b}', ['\u{10443}', '\u{0}', '\u{0}']),
+ ('\u{1041c}', ['\u{10444}', '\u{0}', '\u{0}']),
+ ('\u{1041d}', ['\u{10445}', '\u{0}', '\u{0}']),
+ ('\u{1041e}', ['\u{10446}', '\u{0}', '\u{0}']),
+ ('\u{1041f}', ['\u{10447}', '\u{0}', '\u{0}']),
+ ('\u{10420}', ['\u{10448}', '\u{0}', '\u{0}']),
+ ('\u{10421}', ['\u{10449}', '\u{0}', '\u{0}']),
+ ('\u{10422}', ['\u{1044a}', '\u{0}', '\u{0}']),
+ ('\u{10423}', ['\u{1044b}', '\u{0}', '\u{0}']),
+ ('\u{10424}', ['\u{1044c}', '\u{0}', '\u{0}']),
+ ('\u{10425}', ['\u{1044d}', '\u{0}', '\u{0}']),
+ ('\u{10426}', ['\u{1044e}', '\u{0}', '\u{0}']),
+ ('\u{10427}', ['\u{1044f}', '\u{0}', '\u{0}']),
+ ('\u{104b0}', ['\u{104d8}', '\u{0}', '\u{0}']),
+ ('\u{104b1}', ['\u{104d9}', '\u{0}', '\u{0}']),
+ ('\u{104b2}', ['\u{104da}', '\u{0}', '\u{0}']),
+ ('\u{104b3}', ['\u{104db}', '\u{0}', '\u{0}']),
+ ('\u{104b4}', ['\u{104dc}', '\u{0}', '\u{0}']),
+ ('\u{104b5}', ['\u{104dd}', '\u{0}', '\u{0}']),
+ ('\u{104b6}', ['\u{104de}', '\u{0}', '\u{0}']),
+ ('\u{104b7}', ['\u{104df}', '\u{0}', '\u{0}']),
+ ('\u{104b8}', ['\u{104e0}', '\u{0}', '\u{0}']),
+ ('\u{104b9}', ['\u{104e1}', '\u{0}', '\u{0}']),
+ ('\u{104ba}', ['\u{104e2}', '\u{0}', '\u{0}']),
+ ('\u{104bb}', ['\u{104e3}', '\u{0}', '\u{0}']),
+ ('\u{104bc}', ['\u{104e4}', '\u{0}', '\u{0}']),
+ ('\u{104bd}', ['\u{104e5}', '\u{0}', '\u{0}']),
+ ('\u{104be}', ['\u{104e6}', '\u{0}', '\u{0}']),
+ ('\u{104bf}', ['\u{104e7}', '\u{0}', '\u{0}']),
+ ('\u{104c0}', ['\u{104e8}', '\u{0}', '\u{0}']),
+ ('\u{104c1}', ['\u{104e9}', '\u{0}', '\u{0}']),
+ ('\u{104c2}', ['\u{104ea}', '\u{0}', '\u{0}']),
+ ('\u{104c3}', ['\u{104eb}', '\u{0}', '\u{0}']),
+ ('\u{104c4}', ['\u{104ec}', '\u{0}', '\u{0}']),
+ ('\u{104c5}', ['\u{104ed}', '\u{0}', '\u{0}']),
+ ('\u{104c6}', ['\u{104ee}', '\u{0}', '\u{0}']),
+ ('\u{104c7}', ['\u{104ef}', '\u{0}', '\u{0}']),
+ ('\u{104c8}', ['\u{104f0}', '\u{0}', '\u{0}']),
+ ('\u{104c9}', ['\u{104f1}', '\u{0}', '\u{0}']),
+ ('\u{104ca}', ['\u{104f2}', '\u{0}', '\u{0}']),
+ ('\u{104cb}', ['\u{104f3}', '\u{0}', '\u{0}']),
+ ('\u{104cc}', ['\u{104f4}', '\u{0}', '\u{0}']),
+ ('\u{104cd}', ['\u{104f5}', '\u{0}', '\u{0}']),
+ ('\u{104ce}', ['\u{104f6}', '\u{0}', '\u{0}']),
+ ('\u{104cf}', ['\u{104f7}', '\u{0}', '\u{0}']),
+ ('\u{104d0}', ['\u{104f8}', '\u{0}', '\u{0}']),
+ ('\u{104d1}', ['\u{104f9}', '\u{0}', '\u{0}']),
+ ('\u{104d2}', ['\u{104fa}', '\u{0}', '\u{0}']),
+ ('\u{104d3}', ['\u{104fb}', '\u{0}', '\u{0}']),
+ ('\u{10c80}', ['\u{10cc0}', '\u{0}', '\u{0}']),
+ ('\u{10c81}', ['\u{10cc1}', '\u{0}', '\u{0}']),
+ ('\u{10c82}', ['\u{10cc2}', '\u{0}', '\u{0}']),
+ ('\u{10c83}', ['\u{10cc3}', '\u{0}', '\u{0}']),
+ ('\u{10c84}', ['\u{10cc4}', '\u{0}', '\u{0}']),
+ ('\u{10c85}', ['\u{10cc5}', '\u{0}', '\u{0}']),
+ ('\u{10c86}', ['\u{10cc6}', '\u{0}', '\u{0}']),
+ ('\u{10c87}', ['\u{10cc7}', '\u{0}', '\u{0}']),
+ ('\u{10c88}', ['\u{10cc8}', '\u{0}', '\u{0}']),
+ ('\u{10c89}', ['\u{10cc9}', '\u{0}', '\u{0}']),
+ ('\u{10c8a}', ['\u{10cca}', '\u{0}', '\u{0}']),
+ ('\u{10c8b}', ['\u{10ccb}', '\u{0}', '\u{0}']),
+ ('\u{10c8c}', ['\u{10ccc}', '\u{0}', '\u{0}']),
+ ('\u{10c8d}', ['\u{10ccd}', '\u{0}', '\u{0}']),
+ ('\u{10c8e}', ['\u{10cce}', '\u{0}', '\u{0}']),
+ ('\u{10c8f}', ['\u{10ccf}', '\u{0}', '\u{0}']),
+ ('\u{10c90}', ['\u{10cd0}', '\u{0}', '\u{0}']),
+ ('\u{10c91}', ['\u{10cd1}', '\u{0}', '\u{0}']),
+ ('\u{10c92}', ['\u{10cd2}', '\u{0}', '\u{0}']),
+ ('\u{10c93}', ['\u{10cd3}', '\u{0}', '\u{0}']),
+ ('\u{10c94}', ['\u{10cd4}', '\u{0}', '\u{0}']),
+ ('\u{10c95}', ['\u{10cd5}', '\u{0}', '\u{0}']),
+ ('\u{10c96}', ['\u{10cd6}', '\u{0}', '\u{0}']),
+ ('\u{10c97}', ['\u{10cd7}', '\u{0}', '\u{0}']),
+ ('\u{10c98}', ['\u{10cd8}', '\u{0}', '\u{0}']),
+ ('\u{10c99}', ['\u{10cd9}', '\u{0}', '\u{0}']),
+ ('\u{10c9a}', ['\u{10cda}', '\u{0}', '\u{0}']),
+ ('\u{10c9b}', ['\u{10cdb}', '\u{0}', '\u{0}']),
+ ('\u{10c9c}', ['\u{10cdc}', '\u{0}', '\u{0}']),
+ ('\u{10c9d}', ['\u{10cdd}', '\u{0}', '\u{0}']),
+ ('\u{10c9e}', ['\u{10cde}', '\u{0}', '\u{0}']),
+ ('\u{10c9f}', ['\u{10cdf}', '\u{0}', '\u{0}']),
+ ('\u{10ca0}', ['\u{10ce0}', '\u{0}', '\u{0}']),
+ ('\u{10ca1}', ['\u{10ce1}', '\u{0}', '\u{0}']),
+ ('\u{10ca2}', ['\u{10ce2}', '\u{0}', '\u{0}']),
+ ('\u{10ca3}', ['\u{10ce3}', '\u{0}', '\u{0}']),
+ ('\u{10ca4}', ['\u{10ce4}', '\u{0}', '\u{0}']),
+ ('\u{10ca5}', ['\u{10ce5}', '\u{0}', '\u{0}']),
+ ('\u{10ca6}', ['\u{10ce6}', '\u{0}', '\u{0}']),
+ ('\u{10ca7}', ['\u{10ce7}', '\u{0}', '\u{0}']),
+ ('\u{10ca8}', ['\u{10ce8}', '\u{0}', '\u{0}']),
+ ('\u{10ca9}', ['\u{10ce9}', '\u{0}', '\u{0}']),
+ ('\u{10caa}', ['\u{10cea}', '\u{0}', '\u{0}']),
+ ('\u{10cab}', ['\u{10ceb}', '\u{0}', '\u{0}']),
+ ('\u{10cac}', ['\u{10cec}', '\u{0}', '\u{0}']),
+ ('\u{10cad}', ['\u{10ced}', '\u{0}', '\u{0}']),
+ ('\u{10cae}', ['\u{10cee}', '\u{0}', '\u{0}']),
+ ('\u{10caf}', ['\u{10cef}', '\u{0}', '\u{0}']),
+ ('\u{10cb0}', ['\u{10cf0}', '\u{0}', '\u{0}']),
+ ('\u{10cb1}', ['\u{10cf1}', '\u{0}', '\u{0}']),
+ ('\u{10cb2}', ['\u{10cf2}', '\u{0}', '\u{0}']),
+ ('\u{118a0}', ['\u{118c0}', '\u{0}', '\u{0}']),
+ ('\u{118a1}', ['\u{118c1}', '\u{0}', '\u{0}']),
+ ('\u{118a2}', ['\u{118c2}', '\u{0}', '\u{0}']),
+ ('\u{118a3}', ['\u{118c3}', '\u{0}', '\u{0}']),
+ ('\u{118a4}', ['\u{118c4}', '\u{0}', '\u{0}']),
+ ('\u{118a5}', ['\u{118c5}', '\u{0}', '\u{0}']),
+ ('\u{118a6}', ['\u{118c6}', '\u{0}', '\u{0}']),
+ ('\u{118a7}', ['\u{118c7}', '\u{0}', '\u{0}']),
+ ('\u{118a8}', ['\u{118c8}', '\u{0}', '\u{0}']),
+ ('\u{118a9}', ['\u{118c9}', '\u{0}', '\u{0}']),
+ ('\u{118aa}', ['\u{118ca}', '\u{0}', '\u{0}']),
+ ('\u{118ab}', ['\u{118cb}', '\u{0}', '\u{0}']),
+ ('\u{118ac}', ['\u{118cc}', '\u{0}', '\u{0}']),
+ ('\u{118ad}', ['\u{118cd}', '\u{0}', '\u{0}']),
+ ('\u{118ae}', ['\u{118ce}', '\u{0}', '\u{0}']),
+ ('\u{118af}', ['\u{118cf}', '\u{0}', '\u{0}']),
+ ('\u{118b0}', ['\u{118d0}', '\u{0}', '\u{0}']),
+ ('\u{118b1}', ['\u{118d1}', '\u{0}', '\u{0}']),
+ ('\u{118b2}', ['\u{118d2}', '\u{0}', '\u{0}']),
+ ('\u{118b3}', ['\u{118d3}', '\u{0}', '\u{0}']),
+ ('\u{118b4}', ['\u{118d4}', '\u{0}', '\u{0}']),
+ ('\u{118b5}', ['\u{118d5}', '\u{0}', '\u{0}']),
+ ('\u{118b6}', ['\u{118d6}', '\u{0}', '\u{0}']),
+ ('\u{118b7}', ['\u{118d7}', '\u{0}', '\u{0}']),
+ ('\u{118b8}', ['\u{118d8}', '\u{0}', '\u{0}']),
+ ('\u{118b9}', ['\u{118d9}', '\u{0}', '\u{0}']),
+ ('\u{118ba}', ['\u{118da}', '\u{0}', '\u{0}']),
+ ('\u{118bb}', ['\u{118db}', '\u{0}', '\u{0}']),
+ ('\u{118bc}', ['\u{118dc}', '\u{0}', '\u{0}']),
+ ('\u{118bd}', ['\u{118dd}', '\u{0}', '\u{0}']),
+ ('\u{118be}', ['\u{118de}', '\u{0}', '\u{0}']),
+ ('\u{118bf}', ['\u{118df}', '\u{0}', '\u{0}']),
+ ('\u{16e40}', ['\u{16e60}', '\u{0}', '\u{0}']),
+ ('\u{16e41}', ['\u{16e61}', '\u{0}', '\u{0}']),
+ ('\u{16e42}', ['\u{16e62}', '\u{0}', '\u{0}']),
+ ('\u{16e43}', ['\u{16e63}', '\u{0}', '\u{0}']),
+ ('\u{16e44}', ['\u{16e64}', '\u{0}', '\u{0}']),
+ ('\u{16e45}', ['\u{16e65}', '\u{0}', '\u{0}']),
+ ('\u{16e46}', ['\u{16e66}', '\u{0}', '\u{0}']),
+ ('\u{16e47}', ['\u{16e67}', '\u{0}', '\u{0}']),
+ ('\u{16e48}', ['\u{16e68}', '\u{0}', '\u{0}']),
+ ('\u{16e49}', ['\u{16e69}', '\u{0}', '\u{0}']),
+ ('\u{16e4a}', ['\u{16e6a}', '\u{0}', '\u{0}']),
+ ('\u{16e4b}', ['\u{16e6b}', '\u{0}', '\u{0}']),
+ ('\u{16e4c}', ['\u{16e6c}', '\u{0}', '\u{0}']),
+ ('\u{16e4d}', ['\u{16e6d}', '\u{0}', '\u{0}']),
+ ('\u{16e4e}', ['\u{16e6e}', '\u{0}', '\u{0}']),
+ ('\u{16e4f}', ['\u{16e6f}', '\u{0}', '\u{0}']),
+ ('\u{16e50}', ['\u{16e70}', '\u{0}', '\u{0}']),
+ ('\u{16e51}', ['\u{16e71}', '\u{0}', '\u{0}']),
+ ('\u{16e52}', ['\u{16e72}', '\u{0}', '\u{0}']),
+ ('\u{16e53}', ['\u{16e73}', '\u{0}', '\u{0}']),
+ ('\u{16e54}', ['\u{16e74}', '\u{0}', '\u{0}']),
+ ('\u{16e55}', ['\u{16e75}', '\u{0}', '\u{0}']),
+ ('\u{16e56}', ['\u{16e76}', '\u{0}', '\u{0}']),
+ ('\u{16e57}', ['\u{16e77}', '\u{0}', '\u{0}']),
+ ('\u{16e58}', ['\u{16e78}', '\u{0}', '\u{0}']),
+ ('\u{16e59}', ['\u{16e79}', '\u{0}', '\u{0}']),
+ ('\u{16e5a}', ['\u{16e7a}', '\u{0}', '\u{0}']),
+ ('\u{16e5b}', ['\u{16e7b}', '\u{0}', '\u{0}']),
+ ('\u{16e5c}', ['\u{16e7c}', '\u{0}', '\u{0}']),
+ ('\u{16e5d}', ['\u{16e7d}', '\u{0}', '\u{0}']),
+ ('\u{16e5e}', ['\u{16e7e}', '\u{0}', '\u{0}']),
+ ('\u{16e5f}', ['\u{16e7f}', '\u{0}', '\u{0}']),
+ ('\u{1e900}', ['\u{1e922}', '\u{0}', '\u{0}']),
+ ('\u{1e901}', ['\u{1e923}', '\u{0}', '\u{0}']),
+ ('\u{1e902}', ['\u{1e924}', '\u{0}', '\u{0}']),
+ ('\u{1e903}', ['\u{1e925}', '\u{0}', '\u{0}']),
+ ('\u{1e904}', ['\u{1e926}', '\u{0}', '\u{0}']),
+ ('\u{1e905}', ['\u{1e927}', '\u{0}', '\u{0}']),
+ ('\u{1e906}', ['\u{1e928}', '\u{0}', '\u{0}']),
+ ('\u{1e907}', ['\u{1e929}', '\u{0}', '\u{0}']),
+ ('\u{1e908}', ['\u{1e92a}', '\u{0}', '\u{0}']),
+ ('\u{1e909}', ['\u{1e92b}', '\u{0}', '\u{0}']),
+ ('\u{1e90a}', ['\u{1e92c}', '\u{0}', '\u{0}']),
+ ('\u{1e90b}', ['\u{1e92d}', '\u{0}', '\u{0}']),
+ ('\u{1e90c}', ['\u{1e92e}', '\u{0}', '\u{0}']),
+ ('\u{1e90d}', ['\u{1e92f}', '\u{0}', '\u{0}']),
+ ('\u{1e90e}', ['\u{1e930}', '\u{0}', '\u{0}']),
+ ('\u{1e90f}', ['\u{1e931}', '\u{0}', '\u{0}']),
+ ('\u{1e910}', ['\u{1e932}', '\u{0}', '\u{0}']),
+ ('\u{1e911}', ['\u{1e933}', '\u{0}', '\u{0}']),
+ ('\u{1e912}', ['\u{1e934}', '\u{0}', '\u{0}']),
+ ('\u{1e913}', ['\u{1e935}', '\u{0}', '\u{0}']),
+ ('\u{1e914}', ['\u{1e936}', '\u{0}', '\u{0}']),
+ ('\u{1e915}', ['\u{1e937}', '\u{0}', '\u{0}']),
+ ('\u{1e916}', ['\u{1e938}', '\u{0}', '\u{0}']),
+ ('\u{1e917}', ['\u{1e939}', '\u{0}', '\u{0}']),
+ ('\u{1e918}', ['\u{1e93a}', '\u{0}', '\u{0}']),
+ ('\u{1e919}', ['\u{1e93b}', '\u{0}', '\u{0}']),
+ ('\u{1e91a}', ['\u{1e93c}', '\u{0}', '\u{0}']),
+ ('\u{1e91b}', ['\u{1e93d}', '\u{0}', '\u{0}']),
+ ('\u{1e91c}', ['\u{1e93e}', '\u{0}', '\u{0}']),
+ ('\u{1e91d}', ['\u{1e93f}', '\u{0}', '\u{0}']),
+ ('\u{1e91e}', ['\u{1e940}', '\u{0}', '\u{0}']),
+ ('\u{1e91f}', ['\u{1e941}', '\u{0}', '\u{0}']),
+ ('\u{1e920}', ['\u{1e942}', '\u{0}', '\u{0}']),
+ ('\u{1e921}', ['\u{1e943}', '\u{0}', '\u{0}']),
+ ];
+
+ static UPPERCASE_TABLE: &[(char, [char; 3])] = &[
+ ('a', ['A', '\u{0}', '\u{0}']), ('b', ['B', '\u{0}', '\u{0}']),
+ ('c', ['C', '\u{0}', '\u{0}']), ('d', ['D', '\u{0}', '\u{0}']),
+ ('e', ['E', '\u{0}', '\u{0}']), ('f', ['F', '\u{0}', '\u{0}']),
+ ('g', ['G', '\u{0}', '\u{0}']), ('h', ['H', '\u{0}', '\u{0}']),
+ ('i', ['I', '\u{0}', '\u{0}']), ('j', ['J', '\u{0}', '\u{0}']),
+ ('k', ['K', '\u{0}', '\u{0}']), ('l', ['L', '\u{0}', '\u{0}']),
+ ('m', ['M', '\u{0}', '\u{0}']), ('n', ['N', '\u{0}', '\u{0}']),
+ ('o', ['O', '\u{0}', '\u{0}']), ('p', ['P', '\u{0}', '\u{0}']),
+ ('q', ['Q', '\u{0}', '\u{0}']), ('r', ['R', '\u{0}', '\u{0}']),
+ ('s', ['S', '\u{0}', '\u{0}']), ('t', ['T', '\u{0}', '\u{0}']),
+ ('u', ['U', '\u{0}', '\u{0}']), ('v', ['V', '\u{0}', '\u{0}']),
+ ('w', ['W', '\u{0}', '\u{0}']), ('x', ['X', '\u{0}', '\u{0}']),
+ ('y', ['Y', '\u{0}', '\u{0}']), ('z', ['Z', '\u{0}', '\u{0}']),
+ ('\u{b5}', ['\u{39c}', '\u{0}', '\u{0}']), ('\u{df}', ['S', 'S', '\u{0}']),
+ ('\u{e0}', ['\u{c0}', '\u{0}', '\u{0}']), ('\u{e1}', ['\u{c1}', '\u{0}', '\u{0}']),
+ ('\u{e2}', ['\u{c2}', '\u{0}', '\u{0}']), ('\u{e3}', ['\u{c3}', '\u{0}', '\u{0}']),
+ ('\u{e4}', ['\u{c4}', '\u{0}', '\u{0}']), ('\u{e5}', ['\u{c5}', '\u{0}', '\u{0}']),
+ ('\u{e6}', ['\u{c6}', '\u{0}', '\u{0}']), ('\u{e7}', ['\u{c7}', '\u{0}', '\u{0}']),
+ ('\u{e8}', ['\u{c8}', '\u{0}', '\u{0}']), ('\u{e9}', ['\u{c9}', '\u{0}', '\u{0}']),
+ ('\u{ea}', ['\u{ca}', '\u{0}', '\u{0}']), ('\u{eb}', ['\u{cb}', '\u{0}', '\u{0}']),
+ ('\u{ec}', ['\u{cc}', '\u{0}', '\u{0}']), ('\u{ed}', ['\u{cd}', '\u{0}', '\u{0}']),
+ ('\u{ee}', ['\u{ce}', '\u{0}', '\u{0}']), ('\u{ef}', ['\u{cf}', '\u{0}', '\u{0}']),
+ ('\u{f0}', ['\u{d0}', '\u{0}', '\u{0}']), ('\u{f1}', ['\u{d1}', '\u{0}', '\u{0}']),
+ ('\u{f2}', ['\u{d2}', '\u{0}', '\u{0}']), ('\u{f3}', ['\u{d3}', '\u{0}', '\u{0}']),
+ ('\u{f4}', ['\u{d4}', '\u{0}', '\u{0}']), ('\u{f5}', ['\u{d5}', '\u{0}', '\u{0}']),
+ ('\u{f6}', ['\u{d6}', '\u{0}', '\u{0}']), ('\u{f8}', ['\u{d8}', '\u{0}', '\u{0}']),
+ ('\u{f9}', ['\u{d9}', '\u{0}', '\u{0}']), ('\u{fa}', ['\u{da}', '\u{0}', '\u{0}']),
+ ('\u{fb}', ['\u{db}', '\u{0}', '\u{0}']), ('\u{fc}', ['\u{dc}', '\u{0}', '\u{0}']),
+ ('\u{fd}', ['\u{dd}', '\u{0}', '\u{0}']), ('\u{fe}', ['\u{de}', '\u{0}', '\u{0}']),
+ ('\u{ff}', ['\u{178}', '\u{0}', '\u{0}']), ('\u{101}', ['\u{100}', '\u{0}', '\u{0}']),
+ ('\u{103}', ['\u{102}', '\u{0}', '\u{0}']), ('\u{105}', ['\u{104}', '\u{0}', '\u{0}']),
+ ('\u{107}', ['\u{106}', '\u{0}', '\u{0}']), ('\u{109}', ['\u{108}', '\u{0}', '\u{0}']),
+ ('\u{10b}', ['\u{10a}', '\u{0}', '\u{0}']), ('\u{10d}', ['\u{10c}', '\u{0}', '\u{0}']),
+ ('\u{10f}', ['\u{10e}', '\u{0}', '\u{0}']), ('\u{111}', ['\u{110}', '\u{0}', '\u{0}']),
+ ('\u{113}', ['\u{112}', '\u{0}', '\u{0}']), ('\u{115}', ['\u{114}', '\u{0}', '\u{0}']),
+ ('\u{117}', ['\u{116}', '\u{0}', '\u{0}']), ('\u{119}', ['\u{118}', '\u{0}', '\u{0}']),
+ ('\u{11b}', ['\u{11a}', '\u{0}', '\u{0}']), ('\u{11d}', ['\u{11c}', '\u{0}', '\u{0}']),
+ ('\u{11f}', ['\u{11e}', '\u{0}', '\u{0}']), ('\u{121}', ['\u{120}', '\u{0}', '\u{0}']),
+ ('\u{123}', ['\u{122}', '\u{0}', '\u{0}']), ('\u{125}', ['\u{124}', '\u{0}', '\u{0}']),
+ ('\u{127}', ['\u{126}', '\u{0}', '\u{0}']), ('\u{129}', ['\u{128}', '\u{0}', '\u{0}']),
+ ('\u{12b}', ['\u{12a}', '\u{0}', '\u{0}']), ('\u{12d}', ['\u{12c}', '\u{0}', '\u{0}']),
+ ('\u{12f}', ['\u{12e}', '\u{0}', '\u{0}']), ('\u{131}', ['I', '\u{0}', '\u{0}']),
+ ('\u{133}', ['\u{132}', '\u{0}', '\u{0}']), ('\u{135}', ['\u{134}', '\u{0}', '\u{0}']),
+ ('\u{137}', ['\u{136}', '\u{0}', '\u{0}']), ('\u{13a}', ['\u{139}', '\u{0}', '\u{0}']),
+ ('\u{13c}', ['\u{13b}', '\u{0}', '\u{0}']), ('\u{13e}', ['\u{13d}', '\u{0}', '\u{0}']),
+ ('\u{140}', ['\u{13f}', '\u{0}', '\u{0}']), ('\u{142}', ['\u{141}', '\u{0}', '\u{0}']),
+ ('\u{144}', ['\u{143}', '\u{0}', '\u{0}']), ('\u{146}', ['\u{145}', '\u{0}', '\u{0}']),
+ ('\u{148}', ['\u{147}', '\u{0}', '\u{0}']), ('\u{149}', ['\u{2bc}', 'N', '\u{0}']),
+ ('\u{14b}', ['\u{14a}', '\u{0}', '\u{0}']), ('\u{14d}', ['\u{14c}', '\u{0}', '\u{0}']),
+ ('\u{14f}', ['\u{14e}', '\u{0}', '\u{0}']), ('\u{151}', ['\u{150}', '\u{0}', '\u{0}']),
+ ('\u{153}', ['\u{152}', '\u{0}', '\u{0}']), ('\u{155}', ['\u{154}', '\u{0}', '\u{0}']),
+ ('\u{157}', ['\u{156}', '\u{0}', '\u{0}']), ('\u{159}', ['\u{158}', '\u{0}', '\u{0}']),
+ ('\u{15b}', ['\u{15a}', '\u{0}', '\u{0}']), ('\u{15d}', ['\u{15c}', '\u{0}', '\u{0}']),
+ ('\u{15f}', ['\u{15e}', '\u{0}', '\u{0}']), ('\u{161}', ['\u{160}', '\u{0}', '\u{0}']),
+ ('\u{163}', ['\u{162}', '\u{0}', '\u{0}']), ('\u{165}', ['\u{164}', '\u{0}', '\u{0}']),
+ ('\u{167}', ['\u{166}', '\u{0}', '\u{0}']), ('\u{169}', ['\u{168}', '\u{0}', '\u{0}']),
+ ('\u{16b}', ['\u{16a}', '\u{0}', '\u{0}']), ('\u{16d}', ['\u{16c}', '\u{0}', '\u{0}']),
+ ('\u{16f}', ['\u{16e}', '\u{0}', '\u{0}']), ('\u{171}', ['\u{170}', '\u{0}', '\u{0}']),
+ ('\u{173}', ['\u{172}', '\u{0}', '\u{0}']), ('\u{175}', ['\u{174}', '\u{0}', '\u{0}']),
+ ('\u{177}', ['\u{176}', '\u{0}', '\u{0}']), ('\u{17a}', ['\u{179}', '\u{0}', '\u{0}']),
+ ('\u{17c}', ['\u{17b}', '\u{0}', '\u{0}']), ('\u{17e}', ['\u{17d}', '\u{0}', '\u{0}']),
+ ('\u{17f}', ['S', '\u{0}', '\u{0}']), ('\u{180}', ['\u{243}', '\u{0}', '\u{0}']),
+ ('\u{183}', ['\u{182}', '\u{0}', '\u{0}']), ('\u{185}', ['\u{184}', '\u{0}', '\u{0}']),
+ ('\u{188}', ['\u{187}', '\u{0}', '\u{0}']), ('\u{18c}', ['\u{18b}', '\u{0}', '\u{0}']),
+ ('\u{192}', ['\u{191}', '\u{0}', '\u{0}']), ('\u{195}', ['\u{1f6}', '\u{0}', '\u{0}']),
+ ('\u{199}', ['\u{198}', '\u{0}', '\u{0}']), ('\u{19a}', ['\u{23d}', '\u{0}', '\u{0}']),
+ ('\u{19e}', ['\u{220}', '\u{0}', '\u{0}']), ('\u{1a1}', ['\u{1a0}', '\u{0}', '\u{0}']),
+ ('\u{1a3}', ['\u{1a2}', '\u{0}', '\u{0}']), ('\u{1a5}', ['\u{1a4}', '\u{0}', '\u{0}']),
+ ('\u{1a8}', ['\u{1a7}', '\u{0}', '\u{0}']), ('\u{1ad}', ['\u{1ac}', '\u{0}', '\u{0}']),
+ ('\u{1b0}', ['\u{1af}', '\u{0}', '\u{0}']), ('\u{1b4}', ['\u{1b3}', '\u{0}', '\u{0}']),
+ ('\u{1b6}', ['\u{1b5}', '\u{0}', '\u{0}']), ('\u{1b9}', ['\u{1b8}', '\u{0}', '\u{0}']),
+ ('\u{1bd}', ['\u{1bc}', '\u{0}', '\u{0}']), ('\u{1bf}', ['\u{1f7}', '\u{0}', '\u{0}']),
+ ('\u{1c5}', ['\u{1c4}', '\u{0}', '\u{0}']), ('\u{1c6}', ['\u{1c4}', '\u{0}', '\u{0}']),
+ ('\u{1c8}', ['\u{1c7}', '\u{0}', '\u{0}']), ('\u{1c9}', ['\u{1c7}', '\u{0}', '\u{0}']),
+ ('\u{1cb}', ['\u{1ca}', '\u{0}', '\u{0}']), ('\u{1cc}', ['\u{1ca}', '\u{0}', '\u{0}']),
+ ('\u{1ce}', ['\u{1cd}', '\u{0}', '\u{0}']), ('\u{1d0}', ['\u{1cf}', '\u{0}', '\u{0}']),
+ ('\u{1d2}', ['\u{1d1}', '\u{0}', '\u{0}']), ('\u{1d4}', ['\u{1d3}', '\u{0}', '\u{0}']),
+ ('\u{1d6}', ['\u{1d5}', '\u{0}', '\u{0}']), ('\u{1d8}', ['\u{1d7}', '\u{0}', '\u{0}']),
+ ('\u{1da}', ['\u{1d9}', '\u{0}', '\u{0}']), ('\u{1dc}', ['\u{1db}', '\u{0}', '\u{0}']),
+ ('\u{1dd}', ['\u{18e}', '\u{0}', '\u{0}']), ('\u{1df}', ['\u{1de}', '\u{0}', '\u{0}']),
+ ('\u{1e1}', ['\u{1e0}', '\u{0}', '\u{0}']), ('\u{1e3}', ['\u{1e2}', '\u{0}', '\u{0}']),
+ ('\u{1e5}', ['\u{1e4}', '\u{0}', '\u{0}']), ('\u{1e7}', ['\u{1e6}', '\u{0}', '\u{0}']),
+ ('\u{1e9}', ['\u{1e8}', '\u{0}', '\u{0}']), ('\u{1eb}', ['\u{1ea}', '\u{0}', '\u{0}']),
+ ('\u{1ed}', ['\u{1ec}', '\u{0}', '\u{0}']), ('\u{1ef}', ['\u{1ee}', '\u{0}', '\u{0}']),
+ ('\u{1f0}', ['J', '\u{30c}', '\u{0}']), ('\u{1f2}', ['\u{1f1}', '\u{0}', '\u{0}']),
+ ('\u{1f3}', ['\u{1f1}', '\u{0}', '\u{0}']), ('\u{1f5}', ['\u{1f4}', '\u{0}', '\u{0}']),
+ ('\u{1f9}', ['\u{1f8}', '\u{0}', '\u{0}']), ('\u{1fb}', ['\u{1fa}', '\u{0}', '\u{0}']),
+ ('\u{1fd}', ['\u{1fc}', '\u{0}', '\u{0}']), ('\u{1ff}', ['\u{1fe}', '\u{0}', '\u{0}']),
+ ('\u{201}', ['\u{200}', '\u{0}', '\u{0}']), ('\u{203}', ['\u{202}', '\u{0}', '\u{0}']),
+ ('\u{205}', ['\u{204}', '\u{0}', '\u{0}']), ('\u{207}', ['\u{206}', '\u{0}', '\u{0}']),
+ ('\u{209}', ['\u{208}', '\u{0}', '\u{0}']), ('\u{20b}', ['\u{20a}', '\u{0}', '\u{0}']),
+ ('\u{20d}', ['\u{20c}', '\u{0}', '\u{0}']), ('\u{20f}', ['\u{20e}', '\u{0}', '\u{0}']),
+ ('\u{211}', ['\u{210}', '\u{0}', '\u{0}']), ('\u{213}', ['\u{212}', '\u{0}', '\u{0}']),
+ ('\u{215}', ['\u{214}', '\u{0}', '\u{0}']), ('\u{217}', ['\u{216}', '\u{0}', '\u{0}']),
+ ('\u{219}', ['\u{218}', '\u{0}', '\u{0}']), ('\u{21b}', ['\u{21a}', '\u{0}', '\u{0}']),
+ ('\u{21d}', ['\u{21c}', '\u{0}', '\u{0}']), ('\u{21f}', ['\u{21e}', '\u{0}', '\u{0}']),
+ ('\u{223}', ['\u{222}', '\u{0}', '\u{0}']), ('\u{225}', ['\u{224}', '\u{0}', '\u{0}']),
+ ('\u{227}', ['\u{226}', '\u{0}', '\u{0}']), ('\u{229}', ['\u{228}', '\u{0}', '\u{0}']),
+ ('\u{22b}', ['\u{22a}', '\u{0}', '\u{0}']), ('\u{22d}', ['\u{22c}', '\u{0}', '\u{0}']),
+ ('\u{22f}', ['\u{22e}', '\u{0}', '\u{0}']), ('\u{231}', ['\u{230}', '\u{0}', '\u{0}']),
+ ('\u{233}', ['\u{232}', '\u{0}', '\u{0}']), ('\u{23c}', ['\u{23b}', '\u{0}', '\u{0}']),
+ ('\u{23f}', ['\u{2c7e}', '\u{0}', '\u{0}']), ('\u{240}', ['\u{2c7f}', '\u{0}', '\u{0}']),
+ ('\u{242}', ['\u{241}', '\u{0}', '\u{0}']), ('\u{247}', ['\u{246}', '\u{0}', '\u{0}']),
+ ('\u{249}', ['\u{248}', '\u{0}', '\u{0}']), ('\u{24b}', ['\u{24a}', '\u{0}', '\u{0}']),
+ ('\u{24d}', ['\u{24c}', '\u{0}', '\u{0}']), ('\u{24f}', ['\u{24e}', '\u{0}', '\u{0}']),
+ ('\u{250}', ['\u{2c6f}', '\u{0}', '\u{0}']), ('\u{251}', ['\u{2c6d}', '\u{0}', '\u{0}']),
+ ('\u{252}', ['\u{2c70}', '\u{0}', '\u{0}']), ('\u{253}', ['\u{181}', '\u{0}', '\u{0}']),
+ ('\u{254}', ['\u{186}', '\u{0}', '\u{0}']), ('\u{256}', ['\u{189}', '\u{0}', '\u{0}']),
+ ('\u{257}', ['\u{18a}', '\u{0}', '\u{0}']), ('\u{259}', ['\u{18f}', '\u{0}', '\u{0}']),
+ ('\u{25b}', ['\u{190}', '\u{0}', '\u{0}']), ('\u{25c}', ['\u{a7ab}', '\u{0}', '\u{0}']),
+ ('\u{260}', ['\u{193}', '\u{0}', '\u{0}']), ('\u{261}', ['\u{a7ac}', '\u{0}', '\u{0}']),
+ ('\u{263}', ['\u{194}', '\u{0}', '\u{0}']), ('\u{265}', ['\u{a78d}', '\u{0}', '\u{0}']),
+ ('\u{266}', ['\u{a7aa}', '\u{0}', '\u{0}']), ('\u{268}', ['\u{197}', '\u{0}', '\u{0}']),
+ ('\u{269}', ['\u{196}', '\u{0}', '\u{0}']), ('\u{26a}', ['\u{a7ae}', '\u{0}', '\u{0}']),
+ ('\u{26b}', ['\u{2c62}', '\u{0}', '\u{0}']), ('\u{26c}', ['\u{a7ad}', '\u{0}', '\u{0}']),
+ ('\u{26f}', ['\u{19c}', '\u{0}', '\u{0}']), ('\u{271}', ['\u{2c6e}', '\u{0}', '\u{0}']),
+ ('\u{272}', ['\u{19d}', '\u{0}', '\u{0}']), ('\u{275}', ['\u{19f}', '\u{0}', '\u{0}']),
+ ('\u{27d}', ['\u{2c64}', '\u{0}', '\u{0}']), ('\u{280}', ['\u{1a6}', '\u{0}', '\u{0}']),
+ ('\u{282}', ['\u{a7c5}', '\u{0}', '\u{0}']), ('\u{283}', ['\u{1a9}', '\u{0}', '\u{0}']),
+ ('\u{287}', ['\u{a7b1}', '\u{0}', '\u{0}']), ('\u{288}', ['\u{1ae}', '\u{0}', '\u{0}']),
+ ('\u{289}', ['\u{244}', '\u{0}', '\u{0}']), ('\u{28a}', ['\u{1b1}', '\u{0}', '\u{0}']),
+ ('\u{28b}', ['\u{1b2}', '\u{0}', '\u{0}']), ('\u{28c}', ['\u{245}', '\u{0}', '\u{0}']),
+ ('\u{292}', ['\u{1b7}', '\u{0}', '\u{0}']), ('\u{29d}', ['\u{a7b2}', '\u{0}', '\u{0}']),
+ ('\u{29e}', ['\u{a7b0}', '\u{0}', '\u{0}']), ('\u{345}', ['\u{399}', '\u{0}', '\u{0}']),
+ ('\u{371}', ['\u{370}', '\u{0}', '\u{0}']), ('\u{373}', ['\u{372}', '\u{0}', '\u{0}']),
+ ('\u{377}', ['\u{376}', '\u{0}', '\u{0}']), ('\u{37b}', ['\u{3fd}', '\u{0}', '\u{0}']),
+ ('\u{37c}', ['\u{3fe}', '\u{0}', '\u{0}']), ('\u{37d}', ['\u{3ff}', '\u{0}', '\u{0}']),
+ ('\u{390}', ['\u{399}', '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\u{0}', '\u{0}']),
+ ('\u{3ad}', ['\u{388}', '\u{0}', '\u{0}']), ('\u{3ae}', ['\u{389}', '\u{0}', '\u{0}']),
+ ('\u{3af}', ['\u{38a}', '\u{0}', '\u{0}']), ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']),
+ ('\u{3b1}', ['\u{391}', '\u{0}', '\u{0}']), ('\u{3b2}', ['\u{392}', '\u{0}', '\u{0}']),
+ ('\u{3b3}', ['\u{393}', '\u{0}', '\u{0}']), ('\u{3b4}', ['\u{394}', '\u{0}', '\u{0}']),
+ ('\u{3b5}', ['\u{395}', '\u{0}', '\u{0}']), ('\u{3b6}', ['\u{396}', '\u{0}', '\u{0}']),
+ ('\u{3b7}', ['\u{397}', '\u{0}', '\u{0}']), ('\u{3b8}', ['\u{398}', '\u{0}', '\u{0}']),
+ ('\u{3b9}', ['\u{399}', '\u{0}', '\u{0}']), ('\u{3ba}', ['\u{39a}', '\u{0}', '\u{0}']),
+ ('\u{3bb}', ['\u{39b}', '\u{0}', '\u{0}']), ('\u{3bc}', ['\u{39c}', '\u{0}', '\u{0}']),
+ ('\u{3bd}', ['\u{39d}', '\u{0}', '\u{0}']), ('\u{3be}', ['\u{39e}', '\u{0}', '\u{0}']),
+ ('\u{3bf}', ['\u{39f}', '\u{0}', '\u{0}']), ('\u{3c0}', ['\u{3a0}', '\u{0}', '\u{0}']),
+ ('\u{3c1}', ['\u{3a1}', '\u{0}', '\u{0}']), ('\u{3c2}', ['\u{3a3}', '\u{0}', '\u{0}']),
+ ('\u{3c3}', ['\u{3a3}', '\u{0}', '\u{0}']), ('\u{3c4}', ['\u{3a4}', '\u{0}', '\u{0}']),
+ ('\u{3c5}', ['\u{3a5}', '\u{0}', '\u{0}']), ('\u{3c6}', ['\u{3a6}', '\u{0}', '\u{0}']),
+ ('\u{3c7}', ['\u{3a7}', '\u{0}', '\u{0}']), ('\u{3c8}', ['\u{3a8}', '\u{0}', '\u{0}']),
+ ('\u{3c9}', ['\u{3a9}', '\u{0}', '\u{0}']), ('\u{3ca}', ['\u{3aa}', '\u{0}', '\u{0}']),
+ ('\u{3cb}', ['\u{3ab}', '\u{0}', '\u{0}']), ('\u{3cc}', ['\u{38c}', '\u{0}', '\u{0}']),
+ ('\u{3cd}', ['\u{38e}', '\u{0}', '\u{0}']), ('\u{3ce}', ['\u{38f}', '\u{0}', '\u{0}']),
+ ('\u{3d0}', ['\u{392}', '\u{0}', '\u{0}']), ('\u{3d1}', ['\u{398}', '\u{0}', '\u{0}']),
+ ('\u{3d5}', ['\u{3a6}', '\u{0}', '\u{0}']), ('\u{3d6}', ['\u{3a0}', '\u{0}', '\u{0}']),
+ ('\u{3d7}', ['\u{3cf}', '\u{0}', '\u{0}']), ('\u{3d9}', ['\u{3d8}', '\u{0}', '\u{0}']),
+ ('\u{3db}', ['\u{3da}', '\u{0}', '\u{0}']), ('\u{3dd}', ['\u{3dc}', '\u{0}', '\u{0}']),
+ ('\u{3df}', ['\u{3de}', '\u{0}', '\u{0}']), ('\u{3e1}', ['\u{3e0}', '\u{0}', '\u{0}']),
+ ('\u{3e3}', ['\u{3e2}', '\u{0}', '\u{0}']), ('\u{3e5}', ['\u{3e4}', '\u{0}', '\u{0}']),
+ ('\u{3e7}', ['\u{3e6}', '\u{0}', '\u{0}']), ('\u{3e9}', ['\u{3e8}', '\u{0}', '\u{0}']),
+ ('\u{3eb}', ['\u{3ea}', '\u{0}', '\u{0}']), ('\u{3ed}', ['\u{3ec}', '\u{0}', '\u{0}']),
+ ('\u{3ef}', ['\u{3ee}', '\u{0}', '\u{0}']), ('\u{3f0}', ['\u{39a}', '\u{0}', '\u{0}']),
+ ('\u{3f1}', ['\u{3a1}', '\u{0}', '\u{0}']), ('\u{3f2}', ['\u{3f9}', '\u{0}', '\u{0}']),
+ ('\u{3f3}', ['\u{37f}', '\u{0}', '\u{0}']), ('\u{3f5}', ['\u{395}', '\u{0}', '\u{0}']),
+ ('\u{3f8}', ['\u{3f7}', '\u{0}', '\u{0}']), ('\u{3fb}', ['\u{3fa}', '\u{0}', '\u{0}']),
+ ('\u{430}', ['\u{410}', '\u{0}', '\u{0}']), ('\u{431}', ['\u{411}', '\u{0}', '\u{0}']),
+ ('\u{432}', ['\u{412}', '\u{0}', '\u{0}']), ('\u{433}', ['\u{413}', '\u{0}', '\u{0}']),
+ ('\u{434}', ['\u{414}', '\u{0}', '\u{0}']), ('\u{435}', ['\u{415}', '\u{0}', '\u{0}']),
+ ('\u{436}', ['\u{416}', '\u{0}', '\u{0}']), ('\u{437}', ['\u{417}', '\u{0}', '\u{0}']),
+ ('\u{438}', ['\u{418}', '\u{0}', '\u{0}']), ('\u{439}', ['\u{419}', '\u{0}', '\u{0}']),
+ ('\u{43a}', ['\u{41a}', '\u{0}', '\u{0}']), ('\u{43b}', ['\u{41b}', '\u{0}', '\u{0}']),
+ ('\u{43c}', ['\u{41c}', '\u{0}', '\u{0}']), ('\u{43d}', ['\u{41d}', '\u{0}', '\u{0}']),
+ ('\u{43e}', ['\u{41e}', '\u{0}', '\u{0}']), ('\u{43f}', ['\u{41f}', '\u{0}', '\u{0}']),
+ ('\u{440}', ['\u{420}', '\u{0}', '\u{0}']), ('\u{441}', ['\u{421}', '\u{0}', '\u{0}']),
+ ('\u{442}', ['\u{422}', '\u{0}', '\u{0}']), ('\u{443}', ['\u{423}', '\u{0}', '\u{0}']),
+ ('\u{444}', ['\u{424}', '\u{0}', '\u{0}']), ('\u{445}', ['\u{425}', '\u{0}', '\u{0}']),
+ ('\u{446}', ['\u{426}', '\u{0}', '\u{0}']), ('\u{447}', ['\u{427}', '\u{0}', '\u{0}']),
+ ('\u{448}', ['\u{428}', '\u{0}', '\u{0}']), ('\u{449}', ['\u{429}', '\u{0}', '\u{0}']),
+ ('\u{44a}', ['\u{42a}', '\u{0}', '\u{0}']), ('\u{44b}', ['\u{42b}', '\u{0}', '\u{0}']),
+ ('\u{44c}', ['\u{42c}', '\u{0}', '\u{0}']), ('\u{44d}', ['\u{42d}', '\u{0}', '\u{0}']),
+ ('\u{44e}', ['\u{42e}', '\u{0}', '\u{0}']), ('\u{44f}', ['\u{42f}', '\u{0}', '\u{0}']),
+ ('\u{450}', ['\u{400}', '\u{0}', '\u{0}']), ('\u{451}', ['\u{401}', '\u{0}', '\u{0}']),
+ ('\u{452}', ['\u{402}', '\u{0}', '\u{0}']), ('\u{453}', ['\u{403}', '\u{0}', '\u{0}']),
+ ('\u{454}', ['\u{404}', '\u{0}', '\u{0}']), ('\u{455}', ['\u{405}', '\u{0}', '\u{0}']),
+ ('\u{456}', ['\u{406}', '\u{0}', '\u{0}']), ('\u{457}', ['\u{407}', '\u{0}', '\u{0}']),
+ ('\u{458}', ['\u{408}', '\u{0}', '\u{0}']), ('\u{459}', ['\u{409}', '\u{0}', '\u{0}']),
+ ('\u{45a}', ['\u{40a}', '\u{0}', '\u{0}']), ('\u{45b}', ['\u{40b}', '\u{0}', '\u{0}']),
+ ('\u{45c}', ['\u{40c}', '\u{0}', '\u{0}']), ('\u{45d}', ['\u{40d}', '\u{0}', '\u{0}']),
+ ('\u{45e}', ['\u{40e}', '\u{0}', '\u{0}']), ('\u{45f}', ['\u{40f}', '\u{0}', '\u{0}']),
+ ('\u{461}', ['\u{460}', '\u{0}', '\u{0}']), ('\u{463}', ['\u{462}', '\u{0}', '\u{0}']),
+ ('\u{465}', ['\u{464}', '\u{0}', '\u{0}']), ('\u{467}', ['\u{466}', '\u{0}', '\u{0}']),
+ ('\u{469}', ['\u{468}', '\u{0}', '\u{0}']), ('\u{46b}', ['\u{46a}', '\u{0}', '\u{0}']),
+ ('\u{46d}', ['\u{46c}', '\u{0}', '\u{0}']), ('\u{46f}', ['\u{46e}', '\u{0}', '\u{0}']),
+ ('\u{471}', ['\u{470}', '\u{0}', '\u{0}']), ('\u{473}', ['\u{472}', '\u{0}', '\u{0}']),
+ ('\u{475}', ['\u{474}', '\u{0}', '\u{0}']), ('\u{477}', ['\u{476}', '\u{0}', '\u{0}']),
+ ('\u{479}', ['\u{478}', '\u{0}', '\u{0}']), ('\u{47b}', ['\u{47a}', '\u{0}', '\u{0}']),
+ ('\u{47d}', ['\u{47c}', '\u{0}', '\u{0}']), ('\u{47f}', ['\u{47e}', '\u{0}', '\u{0}']),
+ ('\u{481}', ['\u{480}', '\u{0}', '\u{0}']), ('\u{48b}', ['\u{48a}', '\u{0}', '\u{0}']),
+ ('\u{48d}', ['\u{48c}', '\u{0}', '\u{0}']), ('\u{48f}', ['\u{48e}', '\u{0}', '\u{0}']),
+ ('\u{491}', ['\u{490}', '\u{0}', '\u{0}']), ('\u{493}', ['\u{492}', '\u{0}', '\u{0}']),
+ ('\u{495}', ['\u{494}', '\u{0}', '\u{0}']), ('\u{497}', ['\u{496}', '\u{0}', '\u{0}']),
+ ('\u{499}', ['\u{498}', '\u{0}', '\u{0}']), ('\u{49b}', ['\u{49a}', '\u{0}', '\u{0}']),
+ ('\u{49d}', ['\u{49c}', '\u{0}', '\u{0}']), ('\u{49f}', ['\u{49e}', '\u{0}', '\u{0}']),
+ ('\u{4a1}', ['\u{4a0}', '\u{0}', '\u{0}']), ('\u{4a3}', ['\u{4a2}', '\u{0}', '\u{0}']),
+ ('\u{4a5}', ['\u{4a4}', '\u{0}', '\u{0}']), ('\u{4a7}', ['\u{4a6}', '\u{0}', '\u{0}']),
+ ('\u{4a9}', ['\u{4a8}', '\u{0}', '\u{0}']), ('\u{4ab}', ['\u{4aa}', '\u{0}', '\u{0}']),
+ ('\u{4ad}', ['\u{4ac}', '\u{0}', '\u{0}']), ('\u{4af}', ['\u{4ae}', '\u{0}', '\u{0}']),
+ ('\u{4b1}', ['\u{4b0}', '\u{0}', '\u{0}']), ('\u{4b3}', ['\u{4b2}', '\u{0}', '\u{0}']),
+ ('\u{4b5}', ['\u{4b4}', '\u{0}', '\u{0}']), ('\u{4b7}', ['\u{4b6}', '\u{0}', '\u{0}']),
+ ('\u{4b9}', ['\u{4b8}', '\u{0}', '\u{0}']), ('\u{4bb}', ['\u{4ba}', '\u{0}', '\u{0}']),
+ ('\u{4bd}', ['\u{4bc}', '\u{0}', '\u{0}']), ('\u{4bf}', ['\u{4be}', '\u{0}', '\u{0}']),
+ ('\u{4c2}', ['\u{4c1}', '\u{0}', '\u{0}']), ('\u{4c4}', ['\u{4c3}', '\u{0}', '\u{0}']),
+ ('\u{4c6}', ['\u{4c5}', '\u{0}', '\u{0}']), ('\u{4c8}', ['\u{4c7}', '\u{0}', '\u{0}']),
+ ('\u{4ca}', ['\u{4c9}', '\u{0}', '\u{0}']), ('\u{4cc}', ['\u{4cb}', '\u{0}', '\u{0}']),
+ ('\u{4ce}', ['\u{4cd}', '\u{0}', '\u{0}']), ('\u{4cf}', ['\u{4c0}', '\u{0}', '\u{0}']),
+ ('\u{4d1}', ['\u{4d0}', '\u{0}', '\u{0}']), ('\u{4d3}', ['\u{4d2}', '\u{0}', '\u{0}']),
+ ('\u{4d5}', ['\u{4d4}', '\u{0}', '\u{0}']), ('\u{4d7}', ['\u{4d6}', '\u{0}', '\u{0}']),
+ ('\u{4d9}', ['\u{4d8}', '\u{0}', '\u{0}']), ('\u{4db}', ['\u{4da}', '\u{0}', '\u{0}']),
+ ('\u{4dd}', ['\u{4dc}', '\u{0}', '\u{0}']), ('\u{4df}', ['\u{4de}', '\u{0}', '\u{0}']),
+ ('\u{4e1}', ['\u{4e0}', '\u{0}', '\u{0}']), ('\u{4e3}', ['\u{4e2}', '\u{0}', '\u{0}']),
+ ('\u{4e5}', ['\u{4e4}', '\u{0}', '\u{0}']), ('\u{4e7}', ['\u{4e6}', '\u{0}', '\u{0}']),
+ ('\u{4e9}', ['\u{4e8}', '\u{0}', '\u{0}']), ('\u{4eb}', ['\u{4ea}', '\u{0}', '\u{0}']),
+ ('\u{4ed}', ['\u{4ec}', '\u{0}', '\u{0}']), ('\u{4ef}', ['\u{4ee}', '\u{0}', '\u{0}']),
+ ('\u{4f1}', ['\u{4f0}', '\u{0}', '\u{0}']), ('\u{4f3}', ['\u{4f2}', '\u{0}', '\u{0}']),
+ ('\u{4f5}', ['\u{4f4}', '\u{0}', '\u{0}']), ('\u{4f7}', ['\u{4f6}', '\u{0}', '\u{0}']),
+ ('\u{4f9}', ['\u{4f8}', '\u{0}', '\u{0}']), ('\u{4fb}', ['\u{4fa}', '\u{0}', '\u{0}']),
+ ('\u{4fd}', ['\u{4fc}', '\u{0}', '\u{0}']), ('\u{4ff}', ['\u{4fe}', '\u{0}', '\u{0}']),
+ ('\u{501}', ['\u{500}', '\u{0}', '\u{0}']), ('\u{503}', ['\u{502}', '\u{0}', '\u{0}']),
+ ('\u{505}', ['\u{504}', '\u{0}', '\u{0}']), ('\u{507}', ['\u{506}', '\u{0}', '\u{0}']),
+ ('\u{509}', ['\u{508}', '\u{0}', '\u{0}']), ('\u{50b}', ['\u{50a}', '\u{0}', '\u{0}']),
+ ('\u{50d}', ['\u{50c}', '\u{0}', '\u{0}']), ('\u{50f}', ['\u{50e}', '\u{0}', '\u{0}']),
+ ('\u{511}', ['\u{510}', '\u{0}', '\u{0}']), ('\u{513}', ['\u{512}', '\u{0}', '\u{0}']),
+ ('\u{515}', ['\u{514}', '\u{0}', '\u{0}']), ('\u{517}', ['\u{516}', '\u{0}', '\u{0}']),
+ ('\u{519}', ['\u{518}', '\u{0}', '\u{0}']), ('\u{51b}', ['\u{51a}', '\u{0}', '\u{0}']),
+ ('\u{51d}', ['\u{51c}', '\u{0}', '\u{0}']), ('\u{51f}', ['\u{51e}', '\u{0}', '\u{0}']),
+ ('\u{521}', ['\u{520}', '\u{0}', '\u{0}']), ('\u{523}', ['\u{522}', '\u{0}', '\u{0}']),
+ ('\u{525}', ['\u{524}', '\u{0}', '\u{0}']), ('\u{527}', ['\u{526}', '\u{0}', '\u{0}']),
+ ('\u{529}', ['\u{528}', '\u{0}', '\u{0}']), ('\u{52b}', ['\u{52a}', '\u{0}', '\u{0}']),
+ ('\u{52d}', ['\u{52c}', '\u{0}', '\u{0}']), ('\u{52f}', ['\u{52e}', '\u{0}', '\u{0}']),
+ ('\u{561}', ['\u{531}', '\u{0}', '\u{0}']), ('\u{562}', ['\u{532}', '\u{0}', '\u{0}']),
+ ('\u{563}', ['\u{533}', '\u{0}', '\u{0}']), ('\u{564}', ['\u{534}', '\u{0}', '\u{0}']),
+ ('\u{565}', ['\u{535}', '\u{0}', '\u{0}']), ('\u{566}', ['\u{536}', '\u{0}', '\u{0}']),
+ ('\u{567}', ['\u{537}', '\u{0}', '\u{0}']), ('\u{568}', ['\u{538}', '\u{0}', '\u{0}']),
+ ('\u{569}', ['\u{539}', '\u{0}', '\u{0}']), ('\u{56a}', ['\u{53a}', '\u{0}', '\u{0}']),
+ ('\u{56b}', ['\u{53b}', '\u{0}', '\u{0}']), ('\u{56c}', ['\u{53c}', '\u{0}', '\u{0}']),
+ ('\u{56d}', ['\u{53d}', '\u{0}', '\u{0}']), ('\u{56e}', ['\u{53e}', '\u{0}', '\u{0}']),
+ ('\u{56f}', ['\u{53f}', '\u{0}', '\u{0}']), ('\u{570}', ['\u{540}', '\u{0}', '\u{0}']),
+ ('\u{571}', ['\u{541}', '\u{0}', '\u{0}']), ('\u{572}', ['\u{542}', '\u{0}', '\u{0}']),
+ ('\u{573}', ['\u{543}', '\u{0}', '\u{0}']), ('\u{574}', ['\u{544}', '\u{0}', '\u{0}']),
+ ('\u{575}', ['\u{545}', '\u{0}', '\u{0}']), ('\u{576}', ['\u{546}', '\u{0}', '\u{0}']),
+ ('\u{577}', ['\u{547}', '\u{0}', '\u{0}']), ('\u{578}', ['\u{548}', '\u{0}', '\u{0}']),
+ ('\u{579}', ['\u{549}', '\u{0}', '\u{0}']), ('\u{57a}', ['\u{54a}', '\u{0}', '\u{0}']),
+ ('\u{57b}', ['\u{54b}', '\u{0}', '\u{0}']), ('\u{57c}', ['\u{54c}', '\u{0}', '\u{0}']),
+ ('\u{57d}', ['\u{54d}', '\u{0}', '\u{0}']), ('\u{57e}', ['\u{54e}', '\u{0}', '\u{0}']),
+ ('\u{57f}', ['\u{54f}', '\u{0}', '\u{0}']), ('\u{580}', ['\u{550}', '\u{0}', '\u{0}']),
+ ('\u{581}', ['\u{551}', '\u{0}', '\u{0}']), ('\u{582}', ['\u{552}', '\u{0}', '\u{0}']),
+ ('\u{583}', ['\u{553}', '\u{0}', '\u{0}']), ('\u{584}', ['\u{554}', '\u{0}', '\u{0}']),
+ ('\u{585}', ['\u{555}', '\u{0}', '\u{0}']), ('\u{586}', ['\u{556}', '\u{0}', '\u{0}']),
+ ('\u{587}', ['\u{535}', '\u{552}', '\u{0}']), ('\u{10d0}', ['\u{1c90}', '\u{0}', '\u{0}']),
+ ('\u{10d1}', ['\u{1c91}', '\u{0}', '\u{0}']), ('\u{10d2}', ['\u{1c92}', '\u{0}', '\u{0}']),
+ ('\u{10d3}', ['\u{1c93}', '\u{0}', '\u{0}']), ('\u{10d4}', ['\u{1c94}', '\u{0}', '\u{0}']),
+ ('\u{10d5}', ['\u{1c95}', '\u{0}', '\u{0}']), ('\u{10d6}', ['\u{1c96}', '\u{0}', '\u{0}']),
+ ('\u{10d7}', ['\u{1c97}', '\u{0}', '\u{0}']), ('\u{10d8}', ['\u{1c98}', '\u{0}', '\u{0}']),
+ ('\u{10d9}', ['\u{1c99}', '\u{0}', '\u{0}']), ('\u{10da}', ['\u{1c9a}', '\u{0}', '\u{0}']),
+ ('\u{10db}', ['\u{1c9b}', '\u{0}', '\u{0}']), ('\u{10dc}', ['\u{1c9c}', '\u{0}', '\u{0}']),
+ ('\u{10dd}', ['\u{1c9d}', '\u{0}', '\u{0}']), ('\u{10de}', ['\u{1c9e}', '\u{0}', '\u{0}']),
+ ('\u{10df}', ['\u{1c9f}', '\u{0}', '\u{0}']), ('\u{10e0}', ['\u{1ca0}', '\u{0}', '\u{0}']),
+ ('\u{10e1}', ['\u{1ca1}', '\u{0}', '\u{0}']), ('\u{10e2}', ['\u{1ca2}', '\u{0}', '\u{0}']),
+ ('\u{10e3}', ['\u{1ca3}', '\u{0}', '\u{0}']), ('\u{10e4}', ['\u{1ca4}', '\u{0}', '\u{0}']),
+ ('\u{10e5}', ['\u{1ca5}', '\u{0}', '\u{0}']), ('\u{10e6}', ['\u{1ca6}', '\u{0}', '\u{0}']),
+ ('\u{10e7}', ['\u{1ca7}', '\u{0}', '\u{0}']), ('\u{10e8}', ['\u{1ca8}', '\u{0}', '\u{0}']),
+ ('\u{10e9}', ['\u{1ca9}', '\u{0}', '\u{0}']), ('\u{10ea}', ['\u{1caa}', '\u{0}', '\u{0}']),
+ ('\u{10eb}', ['\u{1cab}', '\u{0}', '\u{0}']), ('\u{10ec}', ['\u{1cac}', '\u{0}', '\u{0}']),
+ ('\u{10ed}', ['\u{1cad}', '\u{0}', '\u{0}']), ('\u{10ee}', ['\u{1cae}', '\u{0}', '\u{0}']),
+ ('\u{10ef}', ['\u{1caf}', '\u{0}', '\u{0}']), ('\u{10f0}', ['\u{1cb0}', '\u{0}', '\u{0}']),
+ ('\u{10f1}', ['\u{1cb1}', '\u{0}', '\u{0}']), ('\u{10f2}', ['\u{1cb2}', '\u{0}', '\u{0}']),
+ ('\u{10f3}', ['\u{1cb3}', '\u{0}', '\u{0}']), ('\u{10f4}', ['\u{1cb4}', '\u{0}', '\u{0}']),
+ ('\u{10f5}', ['\u{1cb5}', '\u{0}', '\u{0}']), ('\u{10f6}', ['\u{1cb6}', '\u{0}', '\u{0}']),
+ ('\u{10f7}', ['\u{1cb7}', '\u{0}', '\u{0}']), ('\u{10f8}', ['\u{1cb8}', '\u{0}', '\u{0}']),
+ ('\u{10f9}', ['\u{1cb9}', '\u{0}', '\u{0}']), ('\u{10fa}', ['\u{1cba}', '\u{0}', '\u{0}']),
+ ('\u{10fd}', ['\u{1cbd}', '\u{0}', '\u{0}']), ('\u{10fe}', ['\u{1cbe}', '\u{0}', '\u{0}']),
+ ('\u{10ff}', ['\u{1cbf}', '\u{0}', '\u{0}']), ('\u{13f8}', ['\u{13f0}', '\u{0}', '\u{0}']),
+ ('\u{13f9}', ['\u{13f1}', '\u{0}', '\u{0}']), ('\u{13fa}', ['\u{13f2}', '\u{0}', '\u{0}']),
+ ('\u{13fb}', ['\u{13f3}', '\u{0}', '\u{0}']), ('\u{13fc}', ['\u{13f4}', '\u{0}', '\u{0}']),
+ ('\u{13fd}', ['\u{13f5}', '\u{0}', '\u{0}']), ('\u{1c80}', ['\u{412}', '\u{0}', '\u{0}']),
+ ('\u{1c81}', ['\u{414}', '\u{0}', '\u{0}']), ('\u{1c82}', ['\u{41e}', '\u{0}', '\u{0}']),
+ ('\u{1c83}', ['\u{421}', '\u{0}', '\u{0}']), ('\u{1c84}', ['\u{422}', '\u{0}', '\u{0}']),
+ ('\u{1c85}', ['\u{422}', '\u{0}', '\u{0}']), ('\u{1c86}', ['\u{42a}', '\u{0}', '\u{0}']),
+ ('\u{1c87}', ['\u{462}', '\u{0}', '\u{0}']), ('\u{1c88}', ['\u{a64a}', '\u{0}', '\u{0}']),
+ ('\u{1d79}', ['\u{a77d}', '\u{0}', '\u{0}']), ('\u{1d7d}', ['\u{2c63}', '\u{0}', '\u{0}']),
+ ('\u{1d8e}', ['\u{a7c6}', '\u{0}', '\u{0}']), ('\u{1e01}', ['\u{1e00}', '\u{0}', '\u{0}']),
+ ('\u{1e03}', ['\u{1e02}', '\u{0}', '\u{0}']), ('\u{1e05}', ['\u{1e04}', '\u{0}', '\u{0}']),
+ ('\u{1e07}', ['\u{1e06}', '\u{0}', '\u{0}']), ('\u{1e09}', ['\u{1e08}', '\u{0}', '\u{0}']),
+ ('\u{1e0b}', ['\u{1e0a}', '\u{0}', '\u{0}']), ('\u{1e0d}', ['\u{1e0c}', '\u{0}', '\u{0}']),
+ ('\u{1e0f}', ['\u{1e0e}', '\u{0}', '\u{0}']), ('\u{1e11}', ['\u{1e10}', '\u{0}', '\u{0}']),
+ ('\u{1e13}', ['\u{1e12}', '\u{0}', '\u{0}']), ('\u{1e15}', ['\u{1e14}', '\u{0}', '\u{0}']),
+ ('\u{1e17}', ['\u{1e16}', '\u{0}', '\u{0}']), ('\u{1e19}', ['\u{1e18}', '\u{0}', '\u{0}']),
+ ('\u{1e1b}', ['\u{1e1a}', '\u{0}', '\u{0}']), ('\u{1e1d}', ['\u{1e1c}', '\u{0}', '\u{0}']),
+ ('\u{1e1f}', ['\u{1e1e}', '\u{0}', '\u{0}']), ('\u{1e21}', ['\u{1e20}', '\u{0}', '\u{0}']),
+ ('\u{1e23}', ['\u{1e22}', '\u{0}', '\u{0}']), ('\u{1e25}', ['\u{1e24}', '\u{0}', '\u{0}']),
+ ('\u{1e27}', ['\u{1e26}', '\u{0}', '\u{0}']), ('\u{1e29}', ['\u{1e28}', '\u{0}', '\u{0}']),
+ ('\u{1e2b}', ['\u{1e2a}', '\u{0}', '\u{0}']), ('\u{1e2d}', ['\u{1e2c}', '\u{0}', '\u{0}']),
+ ('\u{1e2f}', ['\u{1e2e}', '\u{0}', '\u{0}']), ('\u{1e31}', ['\u{1e30}', '\u{0}', '\u{0}']),
+ ('\u{1e33}', ['\u{1e32}', '\u{0}', '\u{0}']), ('\u{1e35}', ['\u{1e34}', '\u{0}', '\u{0}']),
+ ('\u{1e37}', ['\u{1e36}', '\u{0}', '\u{0}']), ('\u{1e39}', ['\u{1e38}', '\u{0}', '\u{0}']),
+ ('\u{1e3b}', ['\u{1e3a}', '\u{0}', '\u{0}']), ('\u{1e3d}', ['\u{1e3c}', '\u{0}', '\u{0}']),
+ ('\u{1e3f}', ['\u{1e3e}', '\u{0}', '\u{0}']), ('\u{1e41}', ['\u{1e40}', '\u{0}', '\u{0}']),
+ ('\u{1e43}', ['\u{1e42}', '\u{0}', '\u{0}']), ('\u{1e45}', ['\u{1e44}', '\u{0}', '\u{0}']),
+ ('\u{1e47}', ['\u{1e46}', '\u{0}', '\u{0}']), ('\u{1e49}', ['\u{1e48}', '\u{0}', '\u{0}']),
+ ('\u{1e4b}', ['\u{1e4a}', '\u{0}', '\u{0}']), ('\u{1e4d}', ['\u{1e4c}', '\u{0}', '\u{0}']),
+ ('\u{1e4f}', ['\u{1e4e}', '\u{0}', '\u{0}']), ('\u{1e51}', ['\u{1e50}', '\u{0}', '\u{0}']),
+ ('\u{1e53}', ['\u{1e52}', '\u{0}', '\u{0}']), ('\u{1e55}', ['\u{1e54}', '\u{0}', '\u{0}']),
+ ('\u{1e57}', ['\u{1e56}', '\u{0}', '\u{0}']), ('\u{1e59}', ['\u{1e58}', '\u{0}', '\u{0}']),
+ ('\u{1e5b}', ['\u{1e5a}', '\u{0}', '\u{0}']), ('\u{1e5d}', ['\u{1e5c}', '\u{0}', '\u{0}']),
+ ('\u{1e5f}', ['\u{1e5e}', '\u{0}', '\u{0}']), ('\u{1e61}', ['\u{1e60}', '\u{0}', '\u{0}']),
+ ('\u{1e63}', ['\u{1e62}', '\u{0}', '\u{0}']), ('\u{1e65}', ['\u{1e64}', '\u{0}', '\u{0}']),
+ ('\u{1e67}', ['\u{1e66}', '\u{0}', '\u{0}']), ('\u{1e69}', ['\u{1e68}', '\u{0}', '\u{0}']),
+ ('\u{1e6b}', ['\u{1e6a}', '\u{0}', '\u{0}']), ('\u{1e6d}', ['\u{1e6c}', '\u{0}', '\u{0}']),
+ ('\u{1e6f}', ['\u{1e6e}', '\u{0}', '\u{0}']), ('\u{1e71}', ['\u{1e70}', '\u{0}', '\u{0}']),
+ ('\u{1e73}', ['\u{1e72}', '\u{0}', '\u{0}']), ('\u{1e75}', ['\u{1e74}', '\u{0}', '\u{0}']),
+ ('\u{1e77}', ['\u{1e76}', '\u{0}', '\u{0}']), ('\u{1e79}', ['\u{1e78}', '\u{0}', '\u{0}']),
+ ('\u{1e7b}', ['\u{1e7a}', '\u{0}', '\u{0}']), ('\u{1e7d}', ['\u{1e7c}', '\u{0}', '\u{0}']),
+ ('\u{1e7f}', ['\u{1e7e}', '\u{0}', '\u{0}']), ('\u{1e81}', ['\u{1e80}', '\u{0}', '\u{0}']),
+ ('\u{1e83}', ['\u{1e82}', '\u{0}', '\u{0}']), ('\u{1e85}', ['\u{1e84}', '\u{0}', '\u{0}']),
+ ('\u{1e87}', ['\u{1e86}', '\u{0}', '\u{0}']), ('\u{1e89}', ['\u{1e88}', '\u{0}', '\u{0}']),
+ ('\u{1e8b}', ['\u{1e8a}', '\u{0}', '\u{0}']), ('\u{1e8d}', ['\u{1e8c}', '\u{0}', '\u{0}']),
+ ('\u{1e8f}', ['\u{1e8e}', '\u{0}', '\u{0}']), ('\u{1e91}', ['\u{1e90}', '\u{0}', '\u{0}']),
+ ('\u{1e93}', ['\u{1e92}', '\u{0}', '\u{0}']), ('\u{1e95}', ['\u{1e94}', '\u{0}', '\u{0}']),
+ ('\u{1e96}', ['H', '\u{331}', '\u{0}']), ('\u{1e97}', ['T', '\u{308}', '\u{0}']),
+ ('\u{1e98}', ['W', '\u{30a}', '\u{0}']), ('\u{1e99}', ['Y', '\u{30a}', '\u{0}']),
+ ('\u{1e9a}', ['A', '\u{2be}', '\u{0}']), ('\u{1e9b}', ['\u{1e60}', '\u{0}', '\u{0}']),
+ ('\u{1ea1}', ['\u{1ea0}', '\u{0}', '\u{0}']), ('\u{1ea3}', ['\u{1ea2}', '\u{0}', '\u{0}']),
+ ('\u{1ea5}', ['\u{1ea4}', '\u{0}', '\u{0}']), ('\u{1ea7}', ['\u{1ea6}', '\u{0}', '\u{0}']),
+ ('\u{1ea9}', ['\u{1ea8}', '\u{0}', '\u{0}']), ('\u{1eab}', ['\u{1eaa}', '\u{0}', '\u{0}']),
+ ('\u{1ead}', ['\u{1eac}', '\u{0}', '\u{0}']), ('\u{1eaf}', ['\u{1eae}', '\u{0}', '\u{0}']),
+ ('\u{1eb1}', ['\u{1eb0}', '\u{0}', '\u{0}']), ('\u{1eb3}', ['\u{1eb2}', '\u{0}', '\u{0}']),
+ ('\u{1eb5}', ['\u{1eb4}', '\u{0}', '\u{0}']), ('\u{1eb7}', ['\u{1eb6}', '\u{0}', '\u{0}']),
+ ('\u{1eb9}', ['\u{1eb8}', '\u{0}', '\u{0}']), ('\u{1ebb}', ['\u{1eba}', '\u{0}', '\u{0}']),
+ ('\u{1ebd}', ['\u{1ebc}', '\u{0}', '\u{0}']), ('\u{1ebf}', ['\u{1ebe}', '\u{0}', '\u{0}']),
+ ('\u{1ec1}', ['\u{1ec0}', '\u{0}', '\u{0}']), ('\u{1ec3}', ['\u{1ec2}', '\u{0}', '\u{0}']),
+ ('\u{1ec5}', ['\u{1ec4}', '\u{0}', '\u{0}']), ('\u{1ec7}', ['\u{1ec6}', '\u{0}', '\u{0}']),
+ ('\u{1ec9}', ['\u{1ec8}', '\u{0}', '\u{0}']), ('\u{1ecb}', ['\u{1eca}', '\u{0}', '\u{0}']),
+ ('\u{1ecd}', ['\u{1ecc}', '\u{0}', '\u{0}']), ('\u{1ecf}', ['\u{1ece}', '\u{0}', '\u{0}']),
+ ('\u{1ed1}', ['\u{1ed0}', '\u{0}', '\u{0}']), ('\u{1ed3}', ['\u{1ed2}', '\u{0}', '\u{0}']),
+ ('\u{1ed5}', ['\u{1ed4}', '\u{0}', '\u{0}']), ('\u{1ed7}', ['\u{1ed6}', '\u{0}', '\u{0}']),
+ ('\u{1ed9}', ['\u{1ed8}', '\u{0}', '\u{0}']), ('\u{1edb}', ['\u{1eda}', '\u{0}', '\u{0}']),
+ ('\u{1edd}', ['\u{1edc}', '\u{0}', '\u{0}']), ('\u{1edf}', ['\u{1ede}', '\u{0}', '\u{0}']),
+ ('\u{1ee1}', ['\u{1ee0}', '\u{0}', '\u{0}']), ('\u{1ee3}', ['\u{1ee2}', '\u{0}', '\u{0}']),
+ ('\u{1ee5}', ['\u{1ee4}', '\u{0}', '\u{0}']), ('\u{1ee7}', ['\u{1ee6}', '\u{0}', '\u{0}']),
+ ('\u{1ee9}', ['\u{1ee8}', '\u{0}', '\u{0}']), ('\u{1eeb}', ['\u{1eea}', '\u{0}', '\u{0}']),
+ ('\u{1eed}', ['\u{1eec}', '\u{0}', '\u{0}']), ('\u{1eef}', ['\u{1eee}', '\u{0}', '\u{0}']),
+ ('\u{1ef1}', ['\u{1ef0}', '\u{0}', '\u{0}']), ('\u{1ef3}', ['\u{1ef2}', '\u{0}', '\u{0}']),
+ ('\u{1ef5}', ['\u{1ef4}', '\u{0}', '\u{0}']), ('\u{1ef7}', ['\u{1ef6}', '\u{0}', '\u{0}']),
+ ('\u{1ef9}', ['\u{1ef8}', '\u{0}', '\u{0}']), ('\u{1efb}', ['\u{1efa}', '\u{0}', '\u{0}']),
+ ('\u{1efd}', ['\u{1efc}', '\u{0}', '\u{0}']), ('\u{1eff}', ['\u{1efe}', '\u{0}', '\u{0}']),
+ ('\u{1f00}', ['\u{1f08}', '\u{0}', '\u{0}']), ('\u{1f01}', ['\u{1f09}', '\u{0}', '\u{0}']),
+ ('\u{1f02}', ['\u{1f0a}', '\u{0}', '\u{0}']), ('\u{1f03}', ['\u{1f0b}', '\u{0}', '\u{0}']),
+ ('\u{1f04}', ['\u{1f0c}', '\u{0}', '\u{0}']), ('\u{1f05}', ['\u{1f0d}', '\u{0}', '\u{0}']),
+ ('\u{1f06}', ['\u{1f0e}', '\u{0}', '\u{0}']), ('\u{1f07}', ['\u{1f0f}', '\u{0}', '\u{0}']),
+ ('\u{1f10}', ['\u{1f18}', '\u{0}', '\u{0}']), ('\u{1f11}', ['\u{1f19}', '\u{0}', '\u{0}']),
+ ('\u{1f12}', ['\u{1f1a}', '\u{0}', '\u{0}']), ('\u{1f13}', ['\u{1f1b}', '\u{0}', '\u{0}']),
+ ('\u{1f14}', ['\u{1f1c}', '\u{0}', '\u{0}']), ('\u{1f15}', ['\u{1f1d}', '\u{0}', '\u{0}']),
+ ('\u{1f20}', ['\u{1f28}', '\u{0}', '\u{0}']), ('\u{1f21}', ['\u{1f29}', '\u{0}', '\u{0}']),
+ ('\u{1f22}', ['\u{1f2a}', '\u{0}', '\u{0}']), ('\u{1f23}', ['\u{1f2b}', '\u{0}', '\u{0}']),
+ ('\u{1f24}', ['\u{1f2c}', '\u{0}', '\u{0}']), ('\u{1f25}', ['\u{1f2d}', '\u{0}', '\u{0}']),
+ ('\u{1f26}', ['\u{1f2e}', '\u{0}', '\u{0}']), ('\u{1f27}', ['\u{1f2f}', '\u{0}', '\u{0}']),
+ ('\u{1f30}', ['\u{1f38}', '\u{0}', '\u{0}']), ('\u{1f31}', ['\u{1f39}', '\u{0}', '\u{0}']),
+ ('\u{1f32}', ['\u{1f3a}', '\u{0}', '\u{0}']), ('\u{1f33}', ['\u{1f3b}', '\u{0}', '\u{0}']),
+ ('\u{1f34}', ['\u{1f3c}', '\u{0}', '\u{0}']), ('\u{1f35}', ['\u{1f3d}', '\u{0}', '\u{0}']),
+ ('\u{1f36}', ['\u{1f3e}', '\u{0}', '\u{0}']), ('\u{1f37}', ['\u{1f3f}', '\u{0}', '\u{0}']),
+ ('\u{1f40}', ['\u{1f48}', '\u{0}', '\u{0}']), ('\u{1f41}', ['\u{1f49}', '\u{0}', '\u{0}']),
+ ('\u{1f42}', ['\u{1f4a}', '\u{0}', '\u{0}']), ('\u{1f43}', ['\u{1f4b}', '\u{0}', '\u{0}']),
+ ('\u{1f44}', ['\u{1f4c}', '\u{0}', '\u{0}']), ('\u{1f45}', ['\u{1f4d}', '\u{0}', '\u{0}']),
+ ('\u{1f50}', ['\u{3a5}', '\u{313}', '\u{0}']), ('\u{1f51}', ['\u{1f59}', '\u{0}', '\u{0}']),
+ ('\u{1f52}', ['\u{3a5}', '\u{313}', '\u{300}']),
+ ('\u{1f53}', ['\u{1f5b}', '\u{0}', '\u{0}']),
+ ('\u{1f54}', ['\u{3a5}', '\u{313}', '\u{301}']),
+ ('\u{1f55}', ['\u{1f5d}', '\u{0}', '\u{0}']),
+ ('\u{1f56}', ['\u{3a5}', '\u{313}', '\u{342}']),
+ ('\u{1f57}', ['\u{1f5f}', '\u{0}', '\u{0}']), ('\u{1f60}', ['\u{1f68}', '\u{0}', '\u{0}']),
+ ('\u{1f61}', ['\u{1f69}', '\u{0}', '\u{0}']), ('\u{1f62}', ['\u{1f6a}', '\u{0}', '\u{0}']),
+ ('\u{1f63}', ['\u{1f6b}', '\u{0}', '\u{0}']), ('\u{1f64}', ['\u{1f6c}', '\u{0}', '\u{0}']),
+ ('\u{1f65}', ['\u{1f6d}', '\u{0}', '\u{0}']), ('\u{1f66}', ['\u{1f6e}', '\u{0}', '\u{0}']),
+ ('\u{1f67}', ['\u{1f6f}', '\u{0}', '\u{0}']), ('\u{1f70}', ['\u{1fba}', '\u{0}', '\u{0}']),
+ ('\u{1f71}', ['\u{1fbb}', '\u{0}', '\u{0}']), ('\u{1f72}', ['\u{1fc8}', '\u{0}', '\u{0}']),
+ ('\u{1f73}', ['\u{1fc9}', '\u{0}', '\u{0}']), ('\u{1f74}', ['\u{1fca}', '\u{0}', '\u{0}']),
+ ('\u{1f75}', ['\u{1fcb}', '\u{0}', '\u{0}']), ('\u{1f76}', ['\u{1fda}', '\u{0}', '\u{0}']),
+ ('\u{1f77}', ['\u{1fdb}', '\u{0}', '\u{0}']), ('\u{1f78}', ['\u{1ff8}', '\u{0}', '\u{0}']),
+ ('\u{1f79}', ['\u{1ff9}', '\u{0}', '\u{0}']), ('\u{1f7a}', ['\u{1fea}', '\u{0}', '\u{0}']),
+ ('\u{1f7b}', ['\u{1feb}', '\u{0}', '\u{0}']), ('\u{1f7c}', ['\u{1ffa}', '\u{0}', '\u{0}']),
+ ('\u{1f7d}', ['\u{1ffb}', '\u{0}', '\u{0}']),
+ ('\u{1f80}', ['\u{1f08}', '\u{399}', '\u{0}']),
+ ('\u{1f81}', ['\u{1f09}', '\u{399}', '\u{0}']),
+ ('\u{1f82}', ['\u{1f0a}', '\u{399}', '\u{0}']),
+ ('\u{1f83}', ['\u{1f0b}', '\u{399}', '\u{0}']),
+ ('\u{1f84}', ['\u{1f0c}', '\u{399}', '\u{0}']),
+ ('\u{1f85}', ['\u{1f0d}', '\u{399}', '\u{0}']),
+ ('\u{1f86}', ['\u{1f0e}', '\u{399}', '\u{0}']),
+ ('\u{1f87}', ['\u{1f0f}', '\u{399}', '\u{0}']),
+ ('\u{1f88}', ['\u{1f08}', '\u{399}', '\u{0}']),
+ ('\u{1f89}', ['\u{1f09}', '\u{399}', '\u{0}']),
+ ('\u{1f8a}', ['\u{1f0a}', '\u{399}', '\u{0}']),
+ ('\u{1f8b}', ['\u{1f0b}', '\u{399}', '\u{0}']),
+ ('\u{1f8c}', ['\u{1f0c}', '\u{399}', '\u{0}']),
+ ('\u{1f8d}', ['\u{1f0d}', '\u{399}', '\u{0}']),
+ ('\u{1f8e}', ['\u{1f0e}', '\u{399}', '\u{0}']),
+ ('\u{1f8f}', ['\u{1f0f}', '\u{399}', '\u{0}']),
+ ('\u{1f90}', ['\u{1f28}', '\u{399}', '\u{0}']),
+ ('\u{1f91}', ['\u{1f29}', '\u{399}', '\u{0}']),
+ ('\u{1f92}', ['\u{1f2a}', '\u{399}', '\u{0}']),
+ ('\u{1f93}', ['\u{1f2b}', '\u{399}', '\u{0}']),
+ ('\u{1f94}', ['\u{1f2c}', '\u{399}', '\u{0}']),
+ ('\u{1f95}', ['\u{1f2d}', '\u{399}', '\u{0}']),
+ ('\u{1f96}', ['\u{1f2e}', '\u{399}', '\u{0}']),
+ ('\u{1f97}', ['\u{1f2f}', '\u{399}', '\u{0}']),
+ ('\u{1f98}', ['\u{1f28}', '\u{399}', '\u{0}']),
+ ('\u{1f99}', ['\u{1f29}', '\u{399}', '\u{0}']),
+ ('\u{1f9a}', ['\u{1f2a}', '\u{399}', '\u{0}']),
+ ('\u{1f9b}', ['\u{1f2b}', '\u{399}', '\u{0}']),
+ ('\u{1f9c}', ['\u{1f2c}', '\u{399}', '\u{0}']),
+ ('\u{1f9d}', ['\u{1f2d}', '\u{399}', '\u{0}']),
+ ('\u{1f9e}', ['\u{1f2e}', '\u{399}', '\u{0}']),
+ ('\u{1f9f}', ['\u{1f2f}', '\u{399}', '\u{0}']),
+ ('\u{1fa0}', ['\u{1f68}', '\u{399}', '\u{0}']),
+ ('\u{1fa1}', ['\u{1f69}', '\u{399}', '\u{0}']),
+ ('\u{1fa2}', ['\u{1f6a}', '\u{399}', '\u{0}']),
+ ('\u{1fa3}', ['\u{1f6b}', '\u{399}', '\u{0}']),
+ ('\u{1fa4}', ['\u{1f6c}', '\u{399}', '\u{0}']),
+ ('\u{1fa5}', ['\u{1f6d}', '\u{399}', '\u{0}']),
+ ('\u{1fa6}', ['\u{1f6e}', '\u{399}', '\u{0}']),
+ ('\u{1fa7}', ['\u{1f6f}', '\u{399}', '\u{0}']),
+ ('\u{1fa8}', ['\u{1f68}', '\u{399}', '\u{0}']),
+ ('\u{1fa9}', ['\u{1f69}', '\u{399}', '\u{0}']),
+ ('\u{1faa}', ['\u{1f6a}', '\u{399}', '\u{0}']),
+ ('\u{1fab}', ['\u{1f6b}', '\u{399}', '\u{0}']),
+ ('\u{1fac}', ['\u{1f6c}', '\u{399}', '\u{0}']),
+ ('\u{1fad}', ['\u{1f6d}', '\u{399}', '\u{0}']),
+ ('\u{1fae}', ['\u{1f6e}', '\u{399}', '\u{0}']),
+ ('\u{1faf}', ['\u{1f6f}', '\u{399}', '\u{0}']),
+ ('\u{1fb0}', ['\u{1fb8}', '\u{0}', '\u{0}']), ('\u{1fb1}', ['\u{1fb9}', '\u{0}', '\u{0}']),
+ ('\u{1fb2}', ['\u{1fba}', '\u{399}', '\u{0}']),
+ ('\u{1fb3}', ['\u{391}', '\u{399}', '\u{0}']),
+ ('\u{1fb4}', ['\u{386}', '\u{399}', '\u{0}']),
+ ('\u{1fb6}', ['\u{391}', '\u{342}', '\u{0}']),
+ ('\u{1fb7}', ['\u{391}', '\u{342}', '\u{399}']),
+ ('\u{1fbc}', ['\u{391}', '\u{399}', '\u{0}']), ('\u{1fbe}', ['\u{399}', '\u{0}', '\u{0}']),
+ ('\u{1fc2}', ['\u{1fca}', '\u{399}', '\u{0}']),
+ ('\u{1fc3}', ['\u{397}', '\u{399}', '\u{0}']),
+ ('\u{1fc4}', ['\u{389}', '\u{399}', '\u{0}']),
+ ('\u{1fc6}', ['\u{397}', '\u{342}', '\u{0}']),
+ ('\u{1fc7}', ['\u{397}', '\u{342}', '\u{399}']),
+ ('\u{1fcc}', ['\u{397}', '\u{399}', '\u{0}']), ('\u{1fd0}', ['\u{1fd8}', '\u{0}', '\u{0}']),
+ ('\u{1fd1}', ['\u{1fd9}', '\u{0}', '\u{0}']),
+ ('\u{1fd2}', ['\u{399}', '\u{308}', '\u{300}']),
+ ('\u{1fd3}', ['\u{399}', '\u{308}', '\u{301}']),
+ ('\u{1fd6}', ['\u{399}', '\u{342}', '\u{0}']),
+ ('\u{1fd7}', ['\u{399}', '\u{308}', '\u{342}']),
+ ('\u{1fe0}', ['\u{1fe8}', '\u{0}', '\u{0}']), ('\u{1fe1}', ['\u{1fe9}', '\u{0}', '\u{0}']),
+ ('\u{1fe2}', ['\u{3a5}', '\u{308}', '\u{300}']),
+ ('\u{1fe3}', ['\u{3a5}', '\u{308}', '\u{301}']),
+ ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\u{0}']), ('\u{1fe5}', ['\u{1fec}', '\u{0}', '\u{0}']),
+ ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\u{0}']),
+ ('\u{1fe7}', ['\u{3a5}', '\u{308}', '\u{342}']),
+ ('\u{1ff2}', ['\u{1ffa}', '\u{399}', '\u{0}']),
+ ('\u{1ff3}', ['\u{3a9}', '\u{399}', '\u{0}']),
+ ('\u{1ff4}', ['\u{38f}', '\u{399}', '\u{0}']),
+ ('\u{1ff6}', ['\u{3a9}', '\u{342}', '\u{0}']),
+ ('\u{1ff7}', ['\u{3a9}', '\u{342}', '\u{399}']),
+ ('\u{1ffc}', ['\u{3a9}', '\u{399}', '\u{0}']), ('\u{214e}', ['\u{2132}', '\u{0}', '\u{0}']),
+ ('\u{2170}', ['\u{2160}', '\u{0}', '\u{0}']), ('\u{2171}', ['\u{2161}', '\u{0}', '\u{0}']),
+ ('\u{2172}', ['\u{2162}', '\u{0}', '\u{0}']), ('\u{2173}', ['\u{2163}', '\u{0}', '\u{0}']),
+ ('\u{2174}', ['\u{2164}', '\u{0}', '\u{0}']), ('\u{2175}', ['\u{2165}', '\u{0}', '\u{0}']),
+ ('\u{2176}', ['\u{2166}', '\u{0}', '\u{0}']), ('\u{2177}', ['\u{2167}', '\u{0}', '\u{0}']),
+ ('\u{2178}', ['\u{2168}', '\u{0}', '\u{0}']), ('\u{2179}', ['\u{2169}', '\u{0}', '\u{0}']),
+ ('\u{217a}', ['\u{216a}', '\u{0}', '\u{0}']), ('\u{217b}', ['\u{216b}', '\u{0}', '\u{0}']),
+ ('\u{217c}', ['\u{216c}', '\u{0}', '\u{0}']), ('\u{217d}', ['\u{216d}', '\u{0}', '\u{0}']),
+ ('\u{217e}', ['\u{216e}', '\u{0}', '\u{0}']), ('\u{217f}', ['\u{216f}', '\u{0}', '\u{0}']),
+ ('\u{2184}', ['\u{2183}', '\u{0}', '\u{0}']), ('\u{24d0}', ['\u{24b6}', '\u{0}', '\u{0}']),
+ ('\u{24d1}', ['\u{24b7}', '\u{0}', '\u{0}']), ('\u{24d2}', ['\u{24b8}', '\u{0}', '\u{0}']),
+ ('\u{24d3}', ['\u{24b9}', '\u{0}', '\u{0}']), ('\u{24d4}', ['\u{24ba}', '\u{0}', '\u{0}']),
+ ('\u{24d5}', ['\u{24bb}', '\u{0}', '\u{0}']), ('\u{24d6}', ['\u{24bc}', '\u{0}', '\u{0}']),
+ ('\u{24d7}', ['\u{24bd}', '\u{0}', '\u{0}']), ('\u{24d8}', ['\u{24be}', '\u{0}', '\u{0}']),
+ ('\u{24d9}', ['\u{24bf}', '\u{0}', '\u{0}']), ('\u{24da}', ['\u{24c0}', '\u{0}', '\u{0}']),
+ ('\u{24db}', ['\u{24c1}', '\u{0}', '\u{0}']), ('\u{24dc}', ['\u{24c2}', '\u{0}', '\u{0}']),
+ ('\u{24dd}', ['\u{24c3}', '\u{0}', '\u{0}']), ('\u{24de}', ['\u{24c4}', '\u{0}', '\u{0}']),
+ ('\u{24df}', ['\u{24c5}', '\u{0}', '\u{0}']), ('\u{24e0}', ['\u{24c6}', '\u{0}', '\u{0}']),
+ ('\u{24e1}', ['\u{24c7}', '\u{0}', '\u{0}']), ('\u{24e2}', ['\u{24c8}', '\u{0}', '\u{0}']),
+ ('\u{24e3}', ['\u{24c9}', '\u{0}', '\u{0}']), ('\u{24e4}', ['\u{24ca}', '\u{0}', '\u{0}']),
+ ('\u{24e5}', ['\u{24cb}', '\u{0}', '\u{0}']), ('\u{24e6}', ['\u{24cc}', '\u{0}', '\u{0}']),
+ ('\u{24e7}', ['\u{24cd}', '\u{0}', '\u{0}']), ('\u{24e8}', ['\u{24ce}', '\u{0}', '\u{0}']),
+ ('\u{24e9}', ['\u{24cf}', '\u{0}', '\u{0}']), ('\u{2c30}', ['\u{2c00}', '\u{0}', '\u{0}']),
+ ('\u{2c31}', ['\u{2c01}', '\u{0}', '\u{0}']), ('\u{2c32}', ['\u{2c02}', '\u{0}', '\u{0}']),
+ ('\u{2c33}', ['\u{2c03}', '\u{0}', '\u{0}']), ('\u{2c34}', ['\u{2c04}', '\u{0}', '\u{0}']),
+ ('\u{2c35}', ['\u{2c05}', '\u{0}', '\u{0}']), ('\u{2c36}', ['\u{2c06}', '\u{0}', '\u{0}']),
+ ('\u{2c37}', ['\u{2c07}', '\u{0}', '\u{0}']), ('\u{2c38}', ['\u{2c08}', '\u{0}', '\u{0}']),
+ ('\u{2c39}', ['\u{2c09}', '\u{0}', '\u{0}']), ('\u{2c3a}', ['\u{2c0a}', '\u{0}', '\u{0}']),
+ ('\u{2c3b}', ['\u{2c0b}', '\u{0}', '\u{0}']), ('\u{2c3c}', ['\u{2c0c}', '\u{0}', '\u{0}']),
+ ('\u{2c3d}', ['\u{2c0d}', '\u{0}', '\u{0}']), ('\u{2c3e}', ['\u{2c0e}', '\u{0}', '\u{0}']),
+ ('\u{2c3f}', ['\u{2c0f}', '\u{0}', '\u{0}']), ('\u{2c40}', ['\u{2c10}', '\u{0}', '\u{0}']),
+ ('\u{2c41}', ['\u{2c11}', '\u{0}', '\u{0}']), ('\u{2c42}', ['\u{2c12}', '\u{0}', '\u{0}']),
+ ('\u{2c43}', ['\u{2c13}', '\u{0}', '\u{0}']), ('\u{2c44}', ['\u{2c14}', '\u{0}', '\u{0}']),
+ ('\u{2c45}', ['\u{2c15}', '\u{0}', '\u{0}']), ('\u{2c46}', ['\u{2c16}', '\u{0}', '\u{0}']),
+ ('\u{2c47}', ['\u{2c17}', '\u{0}', '\u{0}']), ('\u{2c48}', ['\u{2c18}', '\u{0}', '\u{0}']),
+ ('\u{2c49}', ['\u{2c19}', '\u{0}', '\u{0}']), ('\u{2c4a}', ['\u{2c1a}', '\u{0}', '\u{0}']),
+ ('\u{2c4b}', ['\u{2c1b}', '\u{0}', '\u{0}']), ('\u{2c4c}', ['\u{2c1c}', '\u{0}', '\u{0}']),
+ ('\u{2c4d}', ['\u{2c1d}', '\u{0}', '\u{0}']), ('\u{2c4e}', ['\u{2c1e}', '\u{0}', '\u{0}']),
+ ('\u{2c4f}', ['\u{2c1f}', '\u{0}', '\u{0}']), ('\u{2c50}', ['\u{2c20}', '\u{0}', '\u{0}']),
+ ('\u{2c51}', ['\u{2c21}', '\u{0}', '\u{0}']), ('\u{2c52}', ['\u{2c22}', '\u{0}', '\u{0}']),
+ ('\u{2c53}', ['\u{2c23}', '\u{0}', '\u{0}']), ('\u{2c54}', ['\u{2c24}', '\u{0}', '\u{0}']),
+ ('\u{2c55}', ['\u{2c25}', '\u{0}', '\u{0}']), ('\u{2c56}', ['\u{2c26}', '\u{0}', '\u{0}']),
+ ('\u{2c57}', ['\u{2c27}', '\u{0}', '\u{0}']), ('\u{2c58}', ['\u{2c28}', '\u{0}', '\u{0}']),
+ ('\u{2c59}', ['\u{2c29}', '\u{0}', '\u{0}']), ('\u{2c5a}', ['\u{2c2a}', '\u{0}', '\u{0}']),
+ ('\u{2c5b}', ['\u{2c2b}', '\u{0}', '\u{0}']), ('\u{2c5c}', ['\u{2c2c}', '\u{0}', '\u{0}']),
+ ('\u{2c5d}', ['\u{2c2d}', '\u{0}', '\u{0}']), ('\u{2c5e}', ['\u{2c2e}', '\u{0}', '\u{0}']),
+ ('\u{2c61}', ['\u{2c60}', '\u{0}', '\u{0}']), ('\u{2c65}', ['\u{23a}', '\u{0}', '\u{0}']),
+ ('\u{2c66}', ['\u{23e}', '\u{0}', '\u{0}']), ('\u{2c68}', ['\u{2c67}', '\u{0}', '\u{0}']),
+ ('\u{2c6a}', ['\u{2c69}', '\u{0}', '\u{0}']), ('\u{2c6c}', ['\u{2c6b}', '\u{0}', '\u{0}']),
+ ('\u{2c73}', ['\u{2c72}', '\u{0}', '\u{0}']), ('\u{2c76}', ['\u{2c75}', '\u{0}', '\u{0}']),
+ ('\u{2c81}', ['\u{2c80}', '\u{0}', '\u{0}']), ('\u{2c83}', ['\u{2c82}', '\u{0}', '\u{0}']),
+ ('\u{2c85}', ['\u{2c84}', '\u{0}', '\u{0}']), ('\u{2c87}', ['\u{2c86}', '\u{0}', '\u{0}']),
+ ('\u{2c89}', ['\u{2c88}', '\u{0}', '\u{0}']), ('\u{2c8b}', ['\u{2c8a}', '\u{0}', '\u{0}']),
+ ('\u{2c8d}', ['\u{2c8c}', '\u{0}', '\u{0}']), ('\u{2c8f}', ['\u{2c8e}', '\u{0}', '\u{0}']),
+ ('\u{2c91}', ['\u{2c90}', '\u{0}', '\u{0}']), ('\u{2c93}', ['\u{2c92}', '\u{0}', '\u{0}']),
+ ('\u{2c95}', ['\u{2c94}', '\u{0}', '\u{0}']), ('\u{2c97}', ['\u{2c96}', '\u{0}', '\u{0}']),
+ ('\u{2c99}', ['\u{2c98}', '\u{0}', '\u{0}']), ('\u{2c9b}', ['\u{2c9a}', '\u{0}', '\u{0}']),
+ ('\u{2c9d}', ['\u{2c9c}', '\u{0}', '\u{0}']), ('\u{2c9f}', ['\u{2c9e}', '\u{0}', '\u{0}']),
+ ('\u{2ca1}', ['\u{2ca0}', '\u{0}', '\u{0}']), ('\u{2ca3}', ['\u{2ca2}', '\u{0}', '\u{0}']),
+ ('\u{2ca5}', ['\u{2ca4}', '\u{0}', '\u{0}']), ('\u{2ca7}', ['\u{2ca6}', '\u{0}', '\u{0}']),
+ ('\u{2ca9}', ['\u{2ca8}', '\u{0}', '\u{0}']), ('\u{2cab}', ['\u{2caa}', '\u{0}', '\u{0}']),
+ ('\u{2cad}', ['\u{2cac}', '\u{0}', '\u{0}']), ('\u{2caf}', ['\u{2cae}', '\u{0}', '\u{0}']),
+ ('\u{2cb1}', ['\u{2cb0}', '\u{0}', '\u{0}']), ('\u{2cb3}', ['\u{2cb2}', '\u{0}', '\u{0}']),
+ ('\u{2cb5}', ['\u{2cb4}', '\u{0}', '\u{0}']), ('\u{2cb7}', ['\u{2cb6}', '\u{0}', '\u{0}']),
+ ('\u{2cb9}', ['\u{2cb8}', '\u{0}', '\u{0}']), ('\u{2cbb}', ['\u{2cba}', '\u{0}', '\u{0}']),
+ ('\u{2cbd}', ['\u{2cbc}', '\u{0}', '\u{0}']), ('\u{2cbf}', ['\u{2cbe}', '\u{0}', '\u{0}']),
+ ('\u{2cc1}', ['\u{2cc0}', '\u{0}', '\u{0}']), ('\u{2cc3}', ['\u{2cc2}', '\u{0}', '\u{0}']),
+ ('\u{2cc5}', ['\u{2cc4}', '\u{0}', '\u{0}']), ('\u{2cc7}', ['\u{2cc6}', '\u{0}', '\u{0}']),
+ ('\u{2cc9}', ['\u{2cc8}', '\u{0}', '\u{0}']), ('\u{2ccb}', ['\u{2cca}', '\u{0}', '\u{0}']),
+ ('\u{2ccd}', ['\u{2ccc}', '\u{0}', '\u{0}']), ('\u{2ccf}', ['\u{2cce}', '\u{0}', '\u{0}']),
+ ('\u{2cd1}', ['\u{2cd0}', '\u{0}', '\u{0}']), ('\u{2cd3}', ['\u{2cd2}', '\u{0}', '\u{0}']),
+ ('\u{2cd5}', ['\u{2cd4}', '\u{0}', '\u{0}']), ('\u{2cd7}', ['\u{2cd6}', '\u{0}', '\u{0}']),
+ ('\u{2cd9}', ['\u{2cd8}', '\u{0}', '\u{0}']), ('\u{2cdb}', ['\u{2cda}', '\u{0}', '\u{0}']),
+ ('\u{2cdd}', ['\u{2cdc}', '\u{0}', '\u{0}']), ('\u{2cdf}', ['\u{2cde}', '\u{0}', '\u{0}']),
+ ('\u{2ce1}', ['\u{2ce0}', '\u{0}', '\u{0}']), ('\u{2ce3}', ['\u{2ce2}', '\u{0}', '\u{0}']),
+ ('\u{2cec}', ['\u{2ceb}', '\u{0}', '\u{0}']), ('\u{2cee}', ['\u{2ced}', '\u{0}', '\u{0}']),
+ ('\u{2cf3}', ['\u{2cf2}', '\u{0}', '\u{0}']), ('\u{2d00}', ['\u{10a0}', '\u{0}', '\u{0}']),
+ ('\u{2d01}', ['\u{10a1}', '\u{0}', '\u{0}']), ('\u{2d02}', ['\u{10a2}', '\u{0}', '\u{0}']),
+ ('\u{2d03}', ['\u{10a3}', '\u{0}', '\u{0}']), ('\u{2d04}', ['\u{10a4}', '\u{0}', '\u{0}']),
+ ('\u{2d05}', ['\u{10a5}', '\u{0}', '\u{0}']), ('\u{2d06}', ['\u{10a6}', '\u{0}', '\u{0}']),
+ ('\u{2d07}', ['\u{10a7}', '\u{0}', '\u{0}']), ('\u{2d08}', ['\u{10a8}', '\u{0}', '\u{0}']),
+ ('\u{2d09}', ['\u{10a9}', '\u{0}', '\u{0}']), ('\u{2d0a}', ['\u{10aa}', '\u{0}', '\u{0}']),
+ ('\u{2d0b}', ['\u{10ab}', '\u{0}', '\u{0}']), ('\u{2d0c}', ['\u{10ac}', '\u{0}', '\u{0}']),
+ ('\u{2d0d}', ['\u{10ad}', '\u{0}', '\u{0}']), ('\u{2d0e}', ['\u{10ae}', '\u{0}', '\u{0}']),
+ ('\u{2d0f}', ['\u{10af}', '\u{0}', '\u{0}']), ('\u{2d10}', ['\u{10b0}', '\u{0}', '\u{0}']),
+ ('\u{2d11}', ['\u{10b1}', '\u{0}', '\u{0}']), ('\u{2d12}', ['\u{10b2}', '\u{0}', '\u{0}']),
+ ('\u{2d13}', ['\u{10b3}', '\u{0}', '\u{0}']), ('\u{2d14}', ['\u{10b4}', '\u{0}', '\u{0}']),
+ ('\u{2d15}', ['\u{10b5}', '\u{0}', '\u{0}']), ('\u{2d16}', ['\u{10b6}', '\u{0}', '\u{0}']),
+ ('\u{2d17}', ['\u{10b7}', '\u{0}', '\u{0}']), ('\u{2d18}', ['\u{10b8}', '\u{0}', '\u{0}']),
+ ('\u{2d19}', ['\u{10b9}', '\u{0}', '\u{0}']), ('\u{2d1a}', ['\u{10ba}', '\u{0}', '\u{0}']),
+ ('\u{2d1b}', ['\u{10bb}', '\u{0}', '\u{0}']), ('\u{2d1c}', ['\u{10bc}', '\u{0}', '\u{0}']),
+ ('\u{2d1d}', ['\u{10bd}', '\u{0}', '\u{0}']), ('\u{2d1e}', ['\u{10be}', '\u{0}', '\u{0}']),
+ ('\u{2d1f}', ['\u{10bf}', '\u{0}', '\u{0}']), ('\u{2d20}', ['\u{10c0}', '\u{0}', '\u{0}']),
+ ('\u{2d21}', ['\u{10c1}', '\u{0}', '\u{0}']), ('\u{2d22}', ['\u{10c2}', '\u{0}', '\u{0}']),
+ ('\u{2d23}', ['\u{10c3}', '\u{0}', '\u{0}']), ('\u{2d24}', ['\u{10c4}', '\u{0}', '\u{0}']),
+ ('\u{2d25}', ['\u{10c5}', '\u{0}', '\u{0}']), ('\u{2d27}', ['\u{10c7}', '\u{0}', '\u{0}']),
+ ('\u{2d2d}', ['\u{10cd}', '\u{0}', '\u{0}']), ('\u{a641}', ['\u{a640}', '\u{0}', '\u{0}']),
+ ('\u{a643}', ['\u{a642}', '\u{0}', '\u{0}']), ('\u{a645}', ['\u{a644}', '\u{0}', '\u{0}']),
+ ('\u{a647}', ['\u{a646}', '\u{0}', '\u{0}']), ('\u{a649}', ['\u{a648}', '\u{0}', '\u{0}']),
+ ('\u{a64b}', ['\u{a64a}', '\u{0}', '\u{0}']), ('\u{a64d}', ['\u{a64c}', '\u{0}', '\u{0}']),
+ ('\u{a64f}', ['\u{a64e}', '\u{0}', '\u{0}']), ('\u{a651}', ['\u{a650}', '\u{0}', '\u{0}']),
+ ('\u{a653}', ['\u{a652}', '\u{0}', '\u{0}']), ('\u{a655}', ['\u{a654}', '\u{0}', '\u{0}']),
+ ('\u{a657}', ['\u{a656}', '\u{0}', '\u{0}']), ('\u{a659}', ['\u{a658}', '\u{0}', '\u{0}']),
+ ('\u{a65b}', ['\u{a65a}', '\u{0}', '\u{0}']), ('\u{a65d}', ['\u{a65c}', '\u{0}', '\u{0}']),
+ ('\u{a65f}', ['\u{a65e}', '\u{0}', '\u{0}']), ('\u{a661}', ['\u{a660}', '\u{0}', '\u{0}']),
+ ('\u{a663}', ['\u{a662}', '\u{0}', '\u{0}']), ('\u{a665}', ['\u{a664}', '\u{0}', '\u{0}']),
+ ('\u{a667}', ['\u{a666}', '\u{0}', '\u{0}']), ('\u{a669}', ['\u{a668}', '\u{0}', '\u{0}']),
+ ('\u{a66b}', ['\u{a66a}', '\u{0}', '\u{0}']), ('\u{a66d}', ['\u{a66c}', '\u{0}', '\u{0}']),
+ ('\u{a681}', ['\u{a680}', '\u{0}', '\u{0}']), ('\u{a683}', ['\u{a682}', '\u{0}', '\u{0}']),
+ ('\u{a685}', ['\u{a684}', '\u{0}', '\u{0}']), ('\u{a687}', ['\u{a686}', '\u{0}', '\u{0}']),
+ ('\u{a689}', ['\u{a688}', '\u{0}', '\u{0}']), ('\u{a68b}', ['\u{a68a}', '\u{0}', '\u{0}']),
+ ('\u{a68d}', ['\u{a68c}', '\u{0}', '\u{0}']), ('\u{a68f}', ['\u{a68e}', '\u{0}', '\u{0}']),
+ ('\u{a691}', ['\u{a690}', '\u{0}', '\u{0}']), ('\u{a693}', ['\u{a692}', '\u{0}', '\u{0}']),
+ ('\u{a695}', ['\u{a694}', '\u{0}', '\u{0}']), ('\u{a697}', ['\u{a696}', '\u{0}', '\u{0}']),
+ ('\u{a699}', ['\u{a698}', '\u{0}', '\u{0}']), ('\u{a69b}', ['\u{a69a}', '\u{0}', '\u{0}']),
+ ('\u{a723}', ['\u{a722}', '\u{0}', '\u{0}']), ('\u{a725}', ['\u{a724}', '\u{0}', '\u{0}']),
+ ('\u{a727}', ['\u{a726}', '\u{0}', '\u{0}']), ('\u{a729}', ['\u{a728}', '\u{0}', '\u{0}']),
+ ('\u{a72b}', ['\u{a72a}', '\u{0}', '\u{0}']), ('\u{a72d}', ['\u{a72c}', '\u{0}', '\u{0}']),
+ ('\u{a72f}', ['\u{a72e}', '\u{0}', '\u{0}']), ('\u{a733}', ['\u{a732}', '\u{0}', '\u{0}']),
+ ('\u{a735}', ['\u{a734}', '\u{0}', '\u{0}']), ('\u{a737}', ['\u{a736}', '\u{0}', '\u{0}']),
+ ('\u{a739}', ['\u{a738}', '\u{0}', '\u{0}']), ('\u{a73b}', ['\u{a73a}', '\u{0}', '\u{0}']),
+ ('\u{a73d}', ['\u{a73c}', '\u{0}', '\u{0}']), ('\u{a73f}', ['\u{a73e}', '\u{0}', '\u{0}']),
+ ('\u{a741}', ['\u{a740}', '\u{0}', '\u{0}']), ('\u{a743}', ['\u{a742}', '\u{0}', '\u{0}']),
+ ('\u{a745}', ['\u{a744}', '\u{0}', '\u{0}']), ('\u{a747}', ['\u{a746}', '\u{0}', '\u{0}']),
+ ('\u{a749}', ['\u{a748}', '\u{0}', '\u{0}']), ('\u{a74b}', ['\u{a74a}', '\u{0}', '\u{0}']),
+ ('\u{a74d}', ['\u{a74c}', '\u{0}', '\u{0}']), ('\u{a74f}', ['\u{a74e}', '\u{0}', '\u{0}']),
+ ('\u{a751}', ['\u{a750}', '\u{0}', '\u{0}']), ('\u{a753}', ['\u{a752}', '\u{0}', '\u{0}']),
+ ('\u{a755}', ['\u{a754}', '\u{0}', '\u{0}']), ('\u{a757}', ['\u{a756}', '\u{0}', '\u{0}']),
+ ('\u{a759}', ['\u{a758}', '\u{0}', '\u{0}']), ('\u{a75b}', ['\u{a75a}', '\u{0}', '\u{0}']),
+ ('\u{a75d}', ['\u{a75c}', '\u{0}', '\u{0}']), ('\u{a75f}', ['\u{a75e}', '\u{0}', '\u{0}']),
+ ('\u{a761}', ['\u{a760}', '\u{0}', '\u{0}']), ('\u{a763}', ['\u{a762}', '\u{0}', '\u{0}']),
+ ('\u{a765}', ['\u{a764}', '\u{0}', '\u{0}']), ('\u{a767}', ['\u{a766}', '\u{0}', '\u{0}']),
+ ('\u{a769}', ['\u{a768}', '\u{0}', '\u{0}']), ('\u{a76b}', ['\u{a76a}', '\u{0}', '\u{0}']),
+ ('\u{a76d}', ['\u{a76c}', '\u{0}', '\u{0}']), ('\u{a76f}', ['\u{a76e}', '\u{0}', '\u{0}']),
+ ('\u{a77a}', ['\u{a779}', '\u{0}', '\u{0}']), ('\u{a77c}', ['\u{a77b}', '\u{0}', '\u{0}']),
+ ('\u{a77f}', ['\u{a77e}', '\u{0}', '\u{0}']), ('\u{a781}', ['\u{a780}', '\u{0}', '\u{0}']),
+ ('\u{a783}', ['\u{a782}', '\u{0}', '\u{0}']), ('\u{a785}', ['\u{a784}', '\u{0}', '\u{0}']),
+ ('\u{a787}', ['\u{a786}', '\u{0}', '\u{0}']), ('\u{a78c}', ['\u{a78b}', '\u{0}', '\u{0}']),
+ ('\u{a791}', ['\u{a790}', '\u{0}', '\u{0}']), ('\u{a793}', ['\u{a792}', '\u{0}', '\u{0}']),
+ ('\u{a794}', ['\u{a7c4}', '\u{0}', '\u{0}']), ('\u{a797}', ['\u{a796}', '\u{0}', '\u{0}']),
+ ('\u{a799}', ['\u{a798}', '\u{0}', '\u{0}']), ('\u{a79b}', ['\u{a79a}', '\u{0}', '\u{0}']),
+ ('\u{a79d}', ['\u{a79c}', '\u{0}', '\u{0}']), ('\u{a79f}', ['\u{a79e}', '\u{0}', '\u{0}']),
+ ('\u{a7a1}', ['\u{a7a0}', '\u{0}', '\u{0}']), ('\u{a7a3}', ['\u{a7a2}', '\u{0}', '\u{0}']),
+ ('\u{a7a5}', ['\u{a7a4}', '\u{0}', '\u{0}']), ('\u{a7a7}', ['\u{a7a6}', '\u{0}', '\u{0}']),
+ ('\u{a7a9}', ['\u{a7a8}', '\u{0}', '\u{0}']), ('\u{a7b5}', ['\u{a7b4}', '\u{0}', '\u{0}']),
+ ('\u{a7b7}', ['\u{a7b6}', '\u{0}', '\u{0}']), ('\u{a7b9}', ['\u{a7b8}', '\u{0}', '\u{0}']),
+ ('\u{a7bb}', ['\u{a7ba}', '\u{0}', '\u{0}']), ('\u{a7bd}', ['\u{a7bc}', '\u{0}', '\u{0}']),
+ ('\u{a7bf}', ['\u{a7be}', '\u{0}', '\u{0}']), ('\u{a7c3}', ['\u{a7c2}', '\u{0}', '\u{0}']),
+ ('\u{a7c8}', ['\u{a7c7}', '\u{0}', '\u{0}']), ('\u{a7ca}', ['\u{a7c9}', '\u{0}', '\u{0}']),
+ ('\u{a7f6}', ['\u{a7f5}', '\u{0}', '\u{0}']), ('\u{ab53}', ['\u{a7b3}', '\u{0}', '\u{0}']),
+ ('\u{ab70}', ['\u{13a0}', '\u{0}', '\u{0}']), ('\u{ab71}', ['\u{13a1}', '\u{0}', '\u{0}']),
+ ('\u{ab72}', ['\u{13a2}', '\u{0}', '\u{0}']), ('\u{ab73}', ['\u{13a3}', '\u{0}', '\u{0}']),
+ ('\u{ab74}', ['\u{13a4}', '\u{0}', '\u{0}']), ('\u{ab75}', ['\u{13a5}', '\u{0}', '\u{0}']),
+ ('\u{ab76}', ['\u{13a6}', '\u{0}', '\u{0}']), ('\u{ab77}', ['\u{13a7}', '\u{0}', '\u{0}']),
+ ('\u{ab78}', ['\u{13a8}', '\u{0}', '\u{0}']), ('\u{ab79}', ['\u{13a9}', '\u{0}', '\u{0}']),
+ ('\u{ab7a}', ['\u{13aa}', '\u{0}', '\u{0}']), ('\u{ab7b}', ['\u{13ab}', '\u{0}', '\u{0}']),
+ ('\u{ab7c}', ['\u{13ac}', '\u{0}', '\u{0}']), ('\u{ab7d}', ['\u{13ad}', '\u{0}', '\u{0}']),
+ ('\u{ab7e}', ['\u{13ae}', '\u{0}', '\u{0}']), ('\u{ab7f}', ['\u{13af}', '\u{0}', '\u{0}']),
+ ('\u{ab80}', ['\u{13b0}', '\u{0}', '\u{0}']), ('\u{ab81}', ['\u{13b1}', '\u{0}', '\u{0}']),
+ ('\u{ab82}', ['\u{13b2}', '\u{0}', '\u{0}']), ('\u{ab83}', ['\u{13b3}', '\u{0}', '\u{0}']),
+ ('\u{ab84}', ['\u{13b4}', '\u{0}', '\u{0}']), ('\u{ab85}', ['\u{13b5}', '\u{0}', '\u{0}']),
+ ('\u{ab86}', ['\u{13b6}', '\u{0}', '\u{0}']), ('\u{ab87}', ['\u{13b7}', '\u{0}', '\u{0}']),
+ ('\u{ab88}', ['\u{13b8}', '\u{0}', '\u{0}']), ('\u{ab89}', ['\u{13b9}', '\u{0}', '\u{0}']),
+ ('\u{ab8a}', ['\u{13ba}', '\u{0}', '\u{0}']), ('\u{ab8b}', ['\u{13bb}', '\u{0}', '\u{0}']),
+ ('\u{ab8c}', ['\u{13bc}', '\u{0}', '\u{0}']), ('\u{ab8d}', ['\u{13bd}', '\u{0}', '\u{0}']),
+ ('\u{ab8e}', ['\u{13be}', '\u{0}', '\u{0}']), ('\u{ab8f}', ['\u{13bf}', '\u{0}', '\u{0}']),
+ ('\u{ab90}', ['\u{13c0}', '\u{0}', '\u{0}']), ('\u{ab91}', ['\u{13c1}', '\u{0}', '\u{0}']),
+ ('\u{ab92}', ['\u{13c2}', '\u{0}', '\u{0}']), ('\u{ab93}', ['\u{13c3}', '\u{0}', '\u{0}']),
+ ('\u{ab94}', ['\u{13c4}', '\u{0}', '\u{0}']), ('\u{ab95}', ['\u{13c5}', '\u{0}', '\u{0}']),
+ ('\u{ab96}', ['\u{13c6}', '\u{0}', '\u{0}']), ('\u{ab97}', ['\u{13c7}', '\u{0}', '\u{0}']),
+ ('\u{ab98}', ['\u{13c8}', '\u{0}', '\u{0}']), ('\u{ab99}', ['\u{13c9}', '\u{0}', '\u{0}']),
+ ('\u{ab9a}', ['\u{13ca}', '\u{0}', '\u{0}']), ('\u{ab9b}', ['\u{13cb}', '\u{0}', '\u{0}']),
+ ('\u{ab9c}', ['\u{13cc}', '\u{0}', '\u{0}']), ('\u{ab9d}', ['\u{13cd}', '\u{0}', '\u{0}']),
+ ('\u{ab9e}', ['\u{13ce}', '\u{0}', '\u{0}']), ('\u{ab9f}', ['\u{13cf}', '\u{0}', '\u{0}']),
+ ('\u{aba0}', ['\u{13d0}', '\u{0}', '\u{0}']), ('\u{aba1}', ['\u{13d1}', '\u{0}', '\u{0}']),
+ ('\u{aba2}', ['\u{13d2}', '\u{0}', '\u{0}']), ('\u{aba3}', ['\u{13d3}', '\u{0}', '\u{0}']),
+ ('\u{aba4}', ['\u{13d4}', '\u{0}', '\u{0}']), ('\u{aba5}', ['\u{13d5}', '\u{0}', '\u{0}']),
+ ('\u{aba6}', ['\u{13d6}', '\u{0}', '\u{0}']), ('\u{aba7}', ['\u{13d7}', '\u{0}', '\u{0}']),
+ ('\u{aba8}', ['\u{13d8}', '\u{0}', '\u{0}']), ('\u{aba9}', ['\u{13d9}', '\u{0}', '\u{0}']),
+ ('\u{abaa}', ['\u{13da}', '\u{0}', '\u{0}']), ('\u{abab}', ['\u{13db}', '\u{0}', '\u{0}']),
+ ('\u{abac}', ['\u{13dc}', '\u{0}', '\u{0}']), ('\u{abad}', ['\u{13dd}', '\u{0}', '\u{0}']),
+ ('\u{abae}', ['\u{13de}', '\u{0}', '\u{0}']), ('\u{abaf}', ['\u{13df}', '\u{0}', '\u{0}']),
+ ('\u{abb0}', ['\u{13e0}', '\u{0}', '\u{0}']), ('\u{abb1}', ['\u{13e1}', '\u{0}', '\u{0}']),
+ ('\u{abb2}', ['\u{13e2}', '\u{0}', '\u{0}']), ('\u{abb3}', ['\u{13e3}', '\u{0}', '\u{0}']),
+ ('\u{abb4}', ['\u{13e4}', '\u{0}', '\u{0}']), ('\u{abb5}', ['\u{13e5}', '\u{0}', '\u{0}']),
+ ('\u{abb6}', ['\u{13e6}', '\u{0}', '\u{0}']), ('\u{abb7}', ['\u{13e7}', '\u{0}', '\u{0}']),
+ ('\u{abb8}', ['\u{13e8}', '\u{0}', '\u{0}']), ('\u{abb9}', ['\u{13e9}', '\u{0}', '\u{0}']),
+ ('\u{abba}', ['\u{13ea}', '\u{0}', '\u{0}']), ('\u{abbb}', ['\u{13eb}', '\u{0}', '\u{0}']),
+ ('\u{abbc}', ['\u{13ec}', '\u{0}', '\u{0}']), ('\u{abbd}', ['\u{13ed}', '\u{0}', '\u{0}']),
+ ('\u{abbe}', ['\u{13ee}', '\u{0}', '\u{0}']), ('\u{abbf}', ['\u{13ef}', '\u{0}', '\u{0}']),
+ ('\u{fb00}', ['F', 'F', '\u{0}']), ('\u{fb01}', ['F', 'I', '\u{0}']),
+ ('\u{fb02}', ['F', 'L', '\u{0}']), ('\u{fb03}', ['F', 'F', 'I']),
+ ('\u{fb04}', ['F', 'F', 'L']), ('\u{fb05}', ['S', 'T', '\u{0}']),
+ ('\u{fb06}', ['S', 'T', '\u{0}']), ('\u{fb13}', ['\u{544}', '\u{546}', '\u{0}']),
+ ('\u{fb14}', ['\u{544}', '\u{535}', '\u{0}']),
+ ('\u{fb15}', ['\u{544}', '\u{53b}', '\u{0}']),
+ ('\u{fb16}', ['\u{54e}', '\u{546}', '\u{0}']),
+ ('\u{fb17}', ['\u{544}', '\u{53d}', '\u{0}']), ('\u{ff41}', ['\u{ff21}', '\u{0}', '\u{0}']),
+ ('\u{ff42}', ['\u{ff22}', '\u{0}', '\u{0}']), ('\u{ff43}', ['\u{ff23}', '\u{0}', '\u{0}']),
+ ('\u{ff44}', ['\u{ff24}', '\u{0}', '\u{0}']), ('\u{ff45}', ['\u{ff25}', '\u{0}', '\u{0}']),
+ ('\u{ff46}', ['\u{ff26}', '\u{0}', '\u{0}']), ('\u{ff47}', ['\u{ff27}', '\u{0}', '\u{0}']),
+ ('\u{ff48}', ['\u{ff28}', '\u{0}', '\u{0}']), ('\u{ff49}', ['\u{ff29}', '\u{0}', '\u{0}']),
+ ('\u{ff4a}', ['\u{ff2a}', '\u{0}', '\u{0}']), ('\u{ff4b}', ['\u{ff2b}', '\u{0}', '\u{0}']),
+ ('\u{ff4c}', ['\u{ff2c}', '\u{0}', '\u{0}']), ('\u{ff4d}', ['\u{ff2d}', '\u{0}', '\u{0}']),
+ ('\u{ff4e}', ['\u{ff2e}', '\u{0}', '\u{0}']), ('\u{ff4f}', ['\u{ff2f}', '\u{0}', '\u{0}']),
+ ('\u{ff50}', ['\u{ff30}', '\u{0}', '\u{0}']), ('\u{ff51}', ['\u{ff31}', '\u{0}', '\u{0}']),
+ ('\u{ff52}', ['\u{ff32}', '\u{0}', '\u{0}']), ('\u{ff53}', ['\u{ff33}', '\u{0}', '\u{0}']),
+ ('\u{ff54}', ['\u{ff34}', '\u{0}', '\u{0}']), ('\u{ff55}', ['\u{ff35}', '\u{0}', '\u{0}']),
+ ('\u{ff56}', ['\u{ff36}', '\u{0}', '\u{0}']), ('\u{ff57}', ['\u{ff37}', '\u{0}', '\u{0}']),
+ ('\u{ff58}', ['\u{ff38}', '\u{0}', '\u{0}']), ('\u{ff59}', ['\u{ff39}', '\u{0}', '\u{0}']),
+ ('\u{ff5a}', ['\u{ff3a}', '\u{0}', '\u{0}']),
+ ('\u{10428}', ['\u{10400}', '\u{0}', '\u{0}']),
+ ('\u{10429}', ['\u{10401}', '\u{0}', '\u{0}']),
+ ('\u{1042a}', ['\u{10402}', '\u{0}', '\u{0}']),
+ ('\u{1042b}', ['\u{10403}', '\u{0}', '\u{0}']),
+ ('\u{1042c}', ['\u{10404}', '\u{0}', '\u{0}']),
+ ('\u{1042d}', ['\u{10405}', '\u{0}', '\u{0}']),
+ ('\u{1042e}', ['\u{10406}', '\u{0}', '\u{0}']),
+ ('\u{1042f}', ['\u{10407}', '\u{0}', '\u{0}']),
+ ('\u{10430}', ['\u{10408}', '\u{0}', '\u{0}']),
+ ('\u{10431}', ['\u{10409}', '\u{0}', '\u{0}']),
+ ('\u{10432}', ['\u{1040a}', '\u{0}', '\u{0}']),
+ ('\u{10433}', ['\u{1040b}', '\u{0}', '\u{0}']),
+ ('\u{10434}', ['\u{1040c}', '\u{0}', '\u{0}']),
+ ('\u{10435}', ['\u{1040d}', '\u{0}', '\u{0}']),
+ ('\u{10436}', ['\u{1040e}', '\u{0}', '\u{0}']),
+ ('\u{10437}', ['\u{1040f}', '\u{0}', '\u{0}']),
+ ('\u{10438}', ['\u{10410}', '\u{0}', '\u{0}']),
+ ('\u{10439}', ['\u{10411}', '\u{0}', '\u{0}']),
+ ('\u{1043a}', ['\u{10412}', '\u{0}', '\u{0}']),
+ ('\u{1043b}', ['\u{10413}', '\u{0}', '\u{0}']),
+ ('\u{1043c}', ['\u{10414}', '\u{0}', '\u{0}']),
+ ('\u{1043d}', ['\u{10415}', '\u{0}', '\u{0}']),
+ ('\u{1043e}', ['\u{10416}', '\u{0}', '\u{0}']),
+ ('\u{1043f}', ['\u{10417}', '\u{0}', '\u{0}']),
+ ('\u{10440}', ['\u{10418}', '\u{0}', '\u{0}']),
+ ('\u{10441}', ['\u{10419}', '\u{0}', '\u{0}']),
+ ('\u{10442}', ['\u{1041a}', '\u{0}', '\u{0}']),
+ ('\u{10443}', ['\u{1041b}', '\u{0}', '\u{0}']),
+ ('\u{10444}', ['\u{1041c}', '\u{0}', '\u{0}']),
+ ('\u{10445}', ['\u{1041d}', '\u{0}', '\u{0}']),
+ ('\u{10446}', ['\u{1041e}', '\u{0}', '\u{0}']),
+ ('\u{10447}', ['\u{1041f}', '\u{0}', '\u{0}']),
+ ('\u{10448}', ['\u{10420}', '\u{0}', '\u{0}']),
+ ('\u{10449}', ['\u{10421}', '\u{0}', '\u{0}']),
+ ('\u{1044a}', ['\u{10422}', '\u{0}', '\u{0}']),
+ ('\u{1044b}', ['\u{10423}', '\u{0}', '\u{0}']),
+ ('\u{1044c}', ['\u{10424}', '\u{0}', '\u{0}']),
+ ('\u{1044d}', ['\u{10425}', '\u{0}', '\u{0}']),
+ ('\u{1044e}', ['\u{10426}', '\u{0}', '\u{0}']),
+ ('\u{1044f}', ['\u{10427}', '\u{0}', '\u{0}']),
+ ('\u{104d8}', ['\u{104b0}', '\u{0}', '\u{0}']),
+ ('\u{104d9}', ['\u{104b1}', '\u{0}', '\u{0}']),
+ ('\u{104da}', ['\u{104b2}', '\u{0}', '\u{0}']),
+ ('\u{104db}', ['\u{104b3}', '\u{0}', '\u{0}']),
+ ('\u{104dc}', ['\u{104b4}', '\u{0}', '\u{0}']),
+ ('\u{104dd}', ['\u{104b5}', '\u{0}', '\u{0}']),
+ ('\u{104de}', ['\u{104b6}', '\u{0}', '\u{0}']),
+ ('\u{104df}', ['\u{104b7}', '\u{0}', '\u{0}']),
+ ('\u{104e0}', ['\u{104b8}', '\u{0}', '\u{0}']),
+ ('\u{104e1}', ['\u{104b9}', '\u{0}', '\u{0}']),
+ ('\u{104e2}', ['\u{104ba}', '\u{0}', '\u{0}']),
+ ('\u{104e3}', ['\u{104bb}', '\u{0}', '\u{0}']),
+ ('\u{104e4}', ['\u{104bc}', '\u{0}', '\u{0}']),
+ ('\u{104e5}', ['\u{104bd}', '\u{0}', '\u{0}']),
+ ('\u{104e6}', ['\u{104be}', '\u{0}', '\u{0}']),
+ ('\u{104e7}', ['\u{104bf}', '\u{0}', '\u{0}']),
+ ('\u{104e8}', ['\u{104c0}', '\u{0}', '\u{0}']),
+ ('\u{104e9}', ['\u{104c1}', '\u{0}', '\u{0}']),
+ ('\u{104ea}', ['\u{104c2}', '\u{0}', '\u{0}']),
+ ('\u{104eb}', ['\u{104c3}', '\u{0}', '\u{0}']),
+ ('\u{104ec}', ['\u{104c4}', '\u{0}', '\u{0}']),
+ ('\u{104ed}', ['\u{104c5}', '\u{0}', '\u{0}']),
+ ('\u{104ee}', ['\u{104c6}', '\u{0}', '\u{0}']),
+ ('\u{104ef}', ['\u{104c7}', '\u{0}', '\u{0}']),
+ ('\u{104f0}', ['\u{104c8}', '\u{0}', '\u{0}']),
+ ('\u{104f1}', ['\u{104c9}', '\u{0}', '\u{0}']),
+ ('\u{104f2}', ['\u{104ca}', '\u{0}', '\u{0}']),
+ ('\u{104f3}', ['\u{104cb}', '\u{0}', '\u{0}']),
+ ('\u{104f4}', ['\u{104cc}', '\u{0}', '\u{0}']),
+ ('\u{104f5}', ['\u{104cd}', '\u{0}', '\u{0}']),
+ ('\u{104f6}', ['\u{104ce}', '\u{0}', '\u{0}']),
+ ('\u{104f7}', ['\u{104cf}', '\u{0}', '\u{0}']),
+ ('\u{104f8}', ['\u{104d0}', '\u{0}', '\u{0}']),
+ ('\u{104f9}', ['\u{104d1}', '\u{0}', '\u{0}']),
+ ('\u{104fa}', ['\u{104d2}', '\u{0}', '\u{0}']),
+ ('\u{104fb}', ['\u{104d3}', '\u{0}', '\u{0}']),
+ ('\u{10cc0}', ['\u{10c80}', '\u{0}', '\u{0}']),
+ ('\u{10cc1}', ['\u{10c81}', '\u{0}', '\u{0}']),
+ ('\u{10cc2}', ['\u{10c82}', '\u{0}', '\u{0}']),
+ ('\u{10cc3}', ['\u{10c83}', '\u{0}', '\u{0}']),
+ ('\u{10cc4}', ['\u{10c84}', '\u{0}', '\u{0}']),
+ ('\u{10cc5}', ['\u{10c85}', '\u{0}', '\u{0}']),
+ ('\u{10cc6}', ['\u{10c86}', '\u{0}', '\u{0}']),
+ ('\u{10cc7}', ['\u{10c87}', '\u{0}', '\u{0}']),
+ ('\u{10cc8}', ['\u{10c88}', '\u{0}', '\u{0}']),
+ ('\u{10cc9}', ['\u{10c89}', '\u{0}', '\u{0}']),
+ ('\u{10cca}', ['\u{10c8a}', '\u{0}', '\u{0}']),
+ ('\u{10ccb}', ['\u{10c8b}', '\u{0}', '\u{0}']),
+ ('\u{10ccc}', ['\u{10c8c}', '\u{0}', '\u{0}']),
+ ('\u{10ccd}', ['\u{10c8d}', '\u{0}', '\u{0}']),
+ ('\u{10cce}', ['\u{10c8e}', '\u{0}', '\u{0}']),
+ ('\u{10ccf}', ['\u{10c8f}', '\u{0}', '\u{0}']),
+ ('\u{10cd0}', ['\u{10c90}', '\u{0}', '\u{0}']),
+ ('\u{10cd1}', ['\u{10c91}', '\u{0}', '\u{0}']),
+ ('\u{10cd2}', ['\u{10c92}', '\u{0}', '\u{0}']),
+ ('\u{10cd3}', ['\u{10c93}', '\u{0}', '\u{0}']),
+ ('\u{10cd4}', ['\u{10c94}', '\u{0}', '\u{0}']),
+ ('\u{10cd5}', ['\u{10c95}', '\u{0}', '\u{0}']),
+ ('\u{10cd6}', ['\u{10c96}', '\u{0}', '\u{0}']),
+ ('\u{10cd7}', ['\u{10c97}', '\u{0}', '\u{0}']),
+ ('\u{10cd8}', ['\u{10c98}', '\u{0}', '\u{0}']),
+ ('\u{10cd9}', ['\u{10c99}', '\u{0}', '\u{0}']),
+ ('\u{10cda}', ['\u{10c9a}', '\u{0}', '\u{0}']),
+ ('\u{10cdb}', ['\u{10c9b}', '\u{0}', '\u{0}']),
+ ('\u{10cdc}', ['\u{10c9c}', '\u{0}', '\u{0}']),
+ ('\u{10cdd}', ['\u{10c9d}', '\u{0}', '\u{0}']),
+ ('\u{10cde}', ['\u{10c9e}', '\u{0}', '\u{0}']),
+ ('\u{10cdf}', ['\u{10c9f}', '\u{0}', '\u{0}']),
+ ('\u{10ce0}', ['\u{10ca0}', '\u{0}', '\u{0}']),
+ ('\u{10ce1}', ['\u{10ca1}', '\u{0}', '\u{0}']),
+ ('\u{10ce2}', ['\u{10ca2}', '\u{0}', '\u{0}']),
+ ('\u{10ce3}', ['\u{10ca3}', '\u{0}', '\u{0}']),
+ ('\u{10ce4}', ['\u{10ca4}', '\u{0}', '\u{0}']),
+ ('\u{10ce5}', ['\u{10ca5}', '\u{0}', '\u{0}']),
+ ('\u{10ce6}', ['\u{10ca6}', '\u{0}', '\u{0}']),
+ ('\u{10ce7}', ['\u{10ca7}', '\u{0}', '\u{0}']),
+ ('\u{10ce8}', ['\u{10ca8}', '\u{0}', '\u{0}']),
+ ('\u{10ce9}', ['\u{10ca9}', '\u{0}', '\u{0}']),
+ ('\u{10cea}', ['\u{10caa}', '\u{0}', '\u{0}']),
+ ('\u{10ceb}', ['\u{10cab}', '\u{0}', '\u{0}']),
+ ('\u{10cec}', ['\u{10cac}', '\u{0}', '\u{0}']),
+ ('\u{10ced}', ['\u{10cad}', '\u{0}', '\u{0}']),
+ ('\u{10cee}', ['\u{10cae}', '\u{0}', '\u{0}']),
+ ('\u{10cef}', ['\u{10caf}', '\u{0}', '\u{0}']),
+ ('\u{10cf0}', ['\u{10cb0}', '\u{0}', '\u{0}']),
+ ('\u{10cf1}', ['\u{10cb1}', '\u{0}', '\u{0}']),
+ ('\u{10cf2}', ['\u{10cb2}', '\u{0}', '\u{0}']),
+ ('\u{118c0}', ['\u{118a0}', '\u{0}', '\u{0}']),
+ ('\u{118c1}', ['\u{118a1}', '\u{0}', '\u{0}']),
+ ('\u{118c2}', ['\u{118a2}', '\u{0}', '\u{0}']),
+ ('\u{118c3}', ['\u{118a3}', '\u{0}', '\u{0}']),
+ ('\u{118c4}', ['\u{118a4}', '\u{0}', '\u{0}']),
+ ('\u{118c5}', ['\u{118a5}', '\u{0}', '\u{0}']),
+ ('\u{118c6}', ['\u{118a6}', '\u{0}', '\u{0}']),
+ ('\u{118c7}', ['\u{118a7}', '\u{0}', '\u{0}']),
+ ('\u{118c8}', ['\u{118a8}', '\u{0}', '\u{0}']),
+ ('\u{118c9}', ['\u{118a9}', '\u{0}', '\u{0}']),
+ ('\u{118ca}', ['\u{118aa}', '\u{0}', '\u{0}']),
+ ('\u{118cb}', ['\u{118ab}', '\u{0}', '\u{0}']),
+ ('\u{118cc}', ['\u{118ac}', '\u{0}', '\u{0}']),
+ ('\u{118cd}', ['\u{118ad}', '\u{0}', '\u{0}']),
+ ('\u{118ce}', ['\u{118ae}', '\u{0}', '\u{0}']),
+ ('\u{118cf}', ['\u{118af}', '\u{0}', '\u{0}']),
+ ('\u{118d0}', ['\u{118b0}', '\u{0}', '\u{0}']),
+ ('\u{118d1}', ['\u{118b1}', '\u{0}', '\u{0}']),
+ ('\u{118d2}', ['\u{118b2}', '\u{0}', '\u{0}']),
+ ('\u{118d3}', ['\u{118b3}', '\u{0}', '\u{0}']),
+ ('\u{118d4}', ['\u{118b4}', '\u{0}', '\u{0}']),
+ ('\u{118d5}', ['\u{118b5}', '\u{0}', '\u{0}']),
+ ('\u{118d6}', ['\u{118b6}', '\u{0}', '\u{0}']),
+ ('\u{118d7}', ['\u{118b7}', '\u{0}', '\u{0}']),
+ ('\u{118d8}', ['\u{118b8}', '\u{0}', '\u{0}']),
+ ('\u{118d9}', ['\u{118b9}', '\u{0}', '\u{0}']),
+ ('\u{118da}', ['\u{118ba}', '\u{0}', '\u{0}']),
+ ('\u{118db}', ['\u{118bb}', '\u{0}', '\u{0}']),
+ ('\u{118dc}', ['\u{118bc}', '\u{0}', '\u{0}']),
+ ('\u{118dd}', ['\u{118bd}', '\u{0}', '\u{0}']),
+ ('\u{118de}', ['\u{118be}', '\u{0}', '\u{0}']),
+ ('\u{118df}', ['\u{118bf}', '\u{0}', '\u{0}']),
+ ('\u{16e60}', ['\u{16e40}', '\u{0}', '\u{0}']),
+ ('\u{16e61}', ['\u{16e41}', '\u{0}', '\u{0}']),
+ ('\u{16e62}', ['\u{16e42}', '\u{0}', '\u{0}']),
+ ('\u{16e63}', ['\u{16e43}', '\u{0}', '\u{0}']),
+ ('\u{16e64}', ['\u{16e44}', '\u{0}', '\u{0}']),
+ ('\u{16e65}', ['\u{16e45}', '\u{0}', '\u{0}']),
+ ('\u{16e66}', ['\u{16e46}', '\u{0}', '\u{0}']),
+ ('\u{16e67}', ['\u{16e47}', '\u{0}', '\u{0}']),
+ ('\u{16e68}', ['\u{16e48}', '\u{0}', '\u{0}']),
+ ('\u{16e69}', ['\u{16e49}', '\u{0}', '\u{0}']),
+ ('\u{16e6a}', ['\u{16e4a}', '\u{0}', '\u{0}']),
+ ('\u{16e6b}', ['\u{16e4b}', '\u{0}', '\u{0}']),
+ ('\u{16e6c}', ['\u{16e4c}', '\u{0}', '\u{0}']),
+ ('\u{16e6d}', ['\u{16e4d}', '\u{0}', '\u{0}']),
+ ('\u{16e6e}', ['\u{16e4e}', '\u{0}', '\u{0}']),
+ ('\u{16e6f}', ['\u{16e4f}', '\u{0}', '\u{0}']),
+ ('\u{16e70}', ['\u{16e50}', '\u{0}', '\u{0}']),
+ ('\u{16e71}', ['\u{16e51}', '\u{0}', '\u{0}']),
+ ('\u{16e72}', ['\u{16e52}', '\u{0}', '\u{0}']),
+ ('\u{16e73}', ['\u{16e53}', '\u{0}', '\u{0}']),
+ ('\u{16e74}', ['\u{16e54}', '\u{0}', '\u{0}']),
+ ('\u{16e75}', ['\u{16e55}', '\u{0}', '\u{0}']),
+ ('\u{16e76}', ['\u{16e56}', '\u{0}', '\u{0}']),
+ ('\u{16e77}', ['\u{16e57}', '\u{0}', '\u{0}']),
+ ('\u{16e78}', ['\u{16e58}', '\u{0}', '\u{0}']),
+ ('\u{16e79}', ['\u{16e59}', '\u{0}', '\u{0}']),
+ ('\u{16e7a}', ['\u{16e5a}', '\u{0}', '\u{0}']),
+ ('\u{16e7b}', ['\u{16e5b}', '\u{0}', '\u{0}']),
+ ('\u{16e7c}', ['\u{16e5c}', '\u{0}', '\u{0}']),
+ ('\u{16e7d}', ['\u{16e5d}', '\u{0}', '\u{0}']),
+ ('\u{16e7e}', ['\u{16e5e}', '\u{0}', '\u{0}']),
+ ('\u{16e7f}', ['\u{16e5f}', '\u{0}', '\u{0}']),
+ ('\u{1e922}', ['\u{1e900}', '\u{0}', '\u{0}']),
+ ('\u{1e923}', ['\u{1e901}', '\u{0}', '\u{0}']),
+ ('\u{1e924}', ['\u{1e902}', '\u{0}', '\u{0}']),
+ ('\u{1e925}', ['\u{1e903}', '\u{0}', '\u{0}']),
+ ('\u{1e926}', ['\u{1e904}', '\u{0}', '\u{0}']),
+ ('\u{1e927}', ['\u{1e905}', '\u{0}', '\u{0}']),
+ ('\u{1e928}', ['\u{1e906}', '\u{0}', '\u{0}']),
+ ('\u{1e929}', ['\u{1e907}', '\u{0}', '\u{0}']),
+ ('\u{1e92a}', ['\u{1e908}', '\u{0}', '\u{0}']),
+ ('\u{1e92b}', ['\u{1e909}', '\u{0}', '\u{0}']),
+ ('\u{1e92c}', ['\u{1e90a}', '\u{0}', '\u{0}']),
+ ('\u{1e92d}', ['\u{1e90b}', '\u{0}', '\u{0}']),
+ ('\u{1e92e}', ['\u{1e90c}', '\u{0}', '\u{0}']),
+ ('\u{1e92f}', ['\u{1e90d}', '\u{0}', '\u{0}']),
+ ('\u{1e930}', ['\u{1e90e}', '\u{0}', '\u{0}']),
+ ('\u{1e931}', ['\u{1e90f}', '\u{0}', '\u{0}']),
+ ('\u{1e932}', ['\u{1e910}', '\u{0}', '\u{0}']),
+ ('\u{1e933}', ['\u{1e911}', '\u{0}', '\u{0}']),
+ ('\u{1e934}', ['\u{1e912}', '\u{0}', '\u{0}']),
+ ('\u{1e935}', ['\u{1e913}', '\u{0}', '\u{0}']),
+ ('\u{1e936}', ['\u{1e914}', '\u{0}', '\u{0}']),
+ ('\u{1e937}', ['\u{1e915}', '\u{0}', '\u{0}']),
+ ('\u{1e938}', ['\u{1e916}', '\u{0}', '\u{0}']),
+ ('\u{1e939}', ['\u{1e917}', '\u{0}', '\u{0}']),
+ ('\u{1e93a}', ['\u{1e918}', '\u{0}', '\u{0}']),
+ ('\u{1e93b}', ['\u{1e919}', '\u{0}', '\u{0}']),
+ ('\u{1e93c}', ['\u{1e91a}', '\u{0}', '\u{0}']),
+ ('\u{1e93d}', ['\u{1e91b}', '\u{0}', '\u{0}']),
+ ('\u{1e93e}', ['\u{1e91c}', '\u{0}', '\u{0}']),
+ ('\u{1e93f}', ['\u{1e91d}', '\u{0}', '\u{0}']),
+ ('\u{1e940}', ['\u{1e91e}', '\u{0}', '\u{0}']),
+ ('\u{1e941}', ['\u{1e91f}', '\u{0}', '\u{0}']),
+ ('\u{1e942}', ['\u{1e920}', '\u{0}', '\u{0}']),
+ ('\u{1e943}', ['\u{1e921}', '\u{0}', '\u{0}']),
+ ];
+}
--- /dev/null
+use crate::iter::FromIterator;
+
+/// Collapses all unit items from an iterator into one.
+///
+/// This is more useful when combined with higher-level abstractions, like
+/// collecting to a `Result<(), E>` where you only care about errors:
+///
+/// ```
+/// use std::io::*;
+/// let data = vec![1, 2, 3, 4, 5];
+/// let res: Result<()> = data.iter()
+/// .map(|x| writeln!(stdout(), "{}", x))
+/// .collect();
+/// assert!(res.is_ok());
+/// ```
+#[stable(feature = "unit_from_iter", since = "1.23.0")]
+impl FromIterator<()> for () {
+ fn from_iter<I: IntoIterator<Item = ()>>(iter: I) -> Self {
+ iter.into_iter().for_each(|()| {})
+ }
+}
--- /dev/null
+use core::alloc::Layout;
+use core::ptr::NonNull;
+
+#[test]
+fn const_unchecked_layout() {
+ const SIZE: usize = 0x2000;
+ const ALIGN: usize = 0x1000;
+ const LAYOUT: Layout = unsafe { Layout::from_size_align_unchecked(SIZE, ALIGN) };
+ const DANGLING: NonNull<u8> = LAYOUT.dangling();
+ assert_eq!(LAYOUT.size(), SIZE);
+ assert_eq!(LAYOUT.align(), ALIGN);
+ assert_eq!(Some(DANGLING), NonNull::new(ALIGN as *mut u8));
+}
--- /dev/null
+use core::any::*;
+
+#[derive(PartialEq, Debug)]
+struct Test;
+
+static TEST: &'static str = "Test";
+
+#[test]
+fn any_referenced() {
+ let (a, b, c) = (&5 as &dyn Any, &TEST as &dyn Any, &Test as &dyn Any);
+
+ assert!(a.is::<i32>());
+ assert!(!b.is::<i32>());
+ assert!(!c.is::<i32>());
+
+ assert!(!a.is::<&'static str>());
+ assert!(b.is::<&'static str>());
+ assert!(!c.is::<&'static str>());
+
+ assert!(!a.is::<Test>());
+ assert!(!b.is::<Test>());
+ assert!(c.is::<Test>());
+}
+
+#[test]
+fn any_owning() {
+ let (a, b, c) =
+ (box 5_usize as Box<dyn Any>, box TEST as Box<dyn Any>, box Test as Box<dyn Any>);
+
+ assert!(a.is::<usize>());
+ assert!(!b.is::<usize>());
+ assert!(!c.is::<usize>());
+
+ assert!(!a.is::<&'static str>());
+ assert!(b.is::<&'static str>());
+ assert!(!c.is::<&'static str>());
+
+ assert!(!a.is::<Test>());
+ assert!(!b.is::<Test>());
+ assert!(c.is::<Test>());
+}
+
+#[test]
+fn any_downcast_ref() {
+ let a = &5_usize as &dyn Any;
+
+ match a.downcast_ref::<usize>() {
+ Some(&5) => {}
+ x => panic!("Unexpected value {:?}", x),
+ }
+
+ match a.downcast_ref::<Test>() {
+ None => {}
+ x => panic!("Unexpected value {:?}", x),
+ }
+}
+
+#[test]
+fn any_downcast_mut() {
+ let mut a = 5_usize;
+ let mut b: Box<_> = box 7_usize;
+
+ let a_r = &mut a as &mut dyn Any;
+ let tmp: &mut usize = &mut *b;
+ let b_r = tmp as &mut dyn Any;
+
+ match a_r.downcast_mut::<usize>() {
+ Some(x) => {
+ assert_eq!(*x, 5);
+ *x = 612;
+ }
+ x => panic!("Unexpected value {:?}", x),
+ }
+
+ match b_r.downcast_mut::<usize>() {
+ Some(x) => {
+ assert_eq!(*x, 7);
+ *x = 413;
+ }
+ x => panic!("Unexpected value {:?}", x),
+ }
+
+ match a_r.downcast_mut::<Test>() {
+ None => (),
+ x => panic!("Unexpected value {:?}", x),
+ }
+
+ match b_r.downcast_mut::<Test>() {
+ None => (),
+ x => panic!("Unexpected value {:?}", x),
+ }
+
+ match a_r.downcast_mut::<usize>() {
+ Some(&mut 612) => {}
+ x => panic!("Unexpected value {:?}", x),
+ }
+
+ match b_r.downcast_mut::<usize>() {
+ Some(&mut 413) => {}
+ x => panic!("Unexpected value {:?}", x),
+ }
+}
+
+#[test]
+fn any_fixed_vec() {
+ let test = [0_usize; 8];
+ let test = &test as &dyn Any;
+ assert!(test.is::<[usize; 8]>());
+ assert!(!test.is::<[usize; 10]>());
+}
+
+#[test]
+fn any_unsized() {
+ fn is_any<T: Any + ?Sized>() {}
+ is_any::<[i32]>();
+}
--- /dev/null
+use core::array::{self, FixedSizeArray, IntoIter};
+use core::convert::TryFrom;
+
+#[test]
+fn fixed_size_array() {
+ let mut array = [0; 64];
+ let mut zero_sized = [(); 64];
+ let mut empty_array = [0; 0];
+ let mut empty_zero_sized = [(); 0];
+
+ assert_eq!(FixedSizeArray::as_slice(&array).len(), 64);
+ assert_eq!(FixedSizeArray::as_slice(&zero_sized).len(), 64);
+ assert_eq!(FixedSizeArray::as_slice(&empty_array).len(), 0);
+ assert_eq!(FixedSizeArray::as_slice(&empty_zero_sized).len(), 0);
+
+ assert_eq!(FixedSizeArray::as_mut_slice(&mut array).len(), 64);
+ assert_eq!(FixedSizeArray::as_mut_slice(&mut zero_sized).len(), 64);
+ assert_eq!(FixedSizeArray::as_mut_slice(&mut empty_array).len(), 0);
+ assert_eq!(FixedSizeArray::as_mut_slice(&mut empty_zero_sized).len(), 0);
+}
+
+#[test]
+fn array_from_ref() {
+ let value: String = "Hello World!".into();
+ let arr: &[String; 1] = array::from_ref(&value);
+ assert_eq!(&[value.clone()], arr);
+}
+
+#[test]
+fn array_from_mut() {
+ let mut value: String = "Hello World".into();
+ let arr: &mut [String; 1] = array::from_mut(&mut value);
+ arr[0].push_str("!");
+ assert_eq!(&value, "Hello World!");
+}
+
+#[test]
+fn array_try_from() {
+ macro_rules! test {
+ ($($N:expr)+) => {
+ $({
+ type Array = [u8; $N];
+ let array: Array = [0; $N];
+ let slice: &[u8] = &array[..];
+
+ let result = <&Array>::try_from(slice);
+ assert_eq!(&array, result.unwrap());
+ })+
+ }
+ }
+ test! {
+ 0 1 2 3 4 5 6 7 8 9
+ 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32
+ }
+}
+
+#[test]
+fn iterator_collect() {
+ let arr = [0, 1, 2, 5, 9];
+ let v: Vec<_> = IntoIter::new(arr.clone()).collect();
+ assert_eq!(&arr[..], &v[..]);
+}
+
+#[test]
+fn iterator_rev_collect() {
+ let arr = [0, 1, 2, 5, 9];
+ let v: Vec<_> = IntoIter::new(arr.clone()).rev().collect();
+ assert_eq!(&v[..], &[9, 5, 2, 1, 0]);
+}
+
+#[test]
+fn iterator_nth() {
+ let v = [0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(IntoIter::new(v.clone()).nth(i).unwrap(), v[i]);
+ }
+ assert_eq!(IntoIter::new(v.clone()).nth(v.len()), None);
+
+ let mut iter = IntoIter::new(v);
+ assert_eq!(iter.nth(2).unwrap(), v[2]);
+ assert_eq!(iter.nth(1).unwrap(), v[4]);
+}
+
+#[test]
+fn iterator_last() {
+ let v = [0, 1, 2, 3, 4];
+ assert_eq!(IntoIter::new(v).last().unwrap(), 4);
+ assert_eq!(IntoIter::new([0]).last().unwrap(), 0);
+
+ let mut it = IntoIter::new([0, 9, 2, 4]);
+ assert_eq!(it.next_back(), Some(4));
+ assert_eq!(it.last(), Some(2));
+}
+
+#[test]
+fn iterator_clone() {
+ let mut it = IntoIter::new([0, 2, 4, 6, 8]);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let mut clone = it.clone();
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(clone.next_back(), Some(6));
+ assert_eq!(it.next_back(), Some(4));
+ assert_eq!(clone.next_back(), Some(4));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(clone.next(), Some(2));
+}
+
+#[test]
+fn iterator_fused() {
+ let mut it = IntoIter::new([0, 9, 2]);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(9));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn iterator_len() {
+ let mut it = IntoIter::new([0, 1, 2, 5, 9]);
+ assert_eq!(it.size_hint(), (5, Some(5)));
+ assert_eq!(it.len(), 5);
+ assert_eq!(it.is_empty(), false);
+
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.is_empty(), false);
+
+ assert_eq!(it.next_back(), Some(9));
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.is_empty(), false);
+
+ // Empty
+ let it = IntoIter::new([] as [String; 0]);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.is_empty(), true);
+}
+
+#[test]
+fn iterator_count() {
+ let v = [0, 1, 2, 3, 4];
+ assert_eq!(IntoIter::new(v.clone()).count(), 5);
+
+ let mut iter2 = IntoIter::new(v);
+ iter2.next();
+ iter2.next();
+ assert_eq!(iter2.count(), 3);
+}
+
+#[test]
+fn iterator_flat_map() {
+ assert!((0..5).flat_map(|i| IntoIter::new([2 * i, 2 * i + 1])).eq(0..10));
+}
+
+#[test]
+fn iterator_debug() {
+ let arr = [0, 1, 2, 5, 9];
+ assert_eq!(format!("{:?}", IntoIter::new(arr)), "IntoIter([0, 1, 2, 5, 9])",);
+}
+
+#[test]
+fn iterator_drops() {
+ use core::cell::Cell;
+
+ // This test makes sure the correct number of elements are dropped. The `R`
+ // type is just a reference to a `Cell` that is incremented when an `R` is
+ // dropped.
+
+ #[derive(Clone)]
+ struct Foo<'a>(&'a Cell<usize>);
+
+ impl Drop for Foo<'_> {
+ fn drop(&mut self) {
+ self.0.set(self.0.get() + 1);
+ }
+ }
+
+ fn five(i: &Cell<usize>) -> [Foo<'_>; 5] {
+ // This is somewhat verbose because `Foo` does not implement `Copy`
+ // since it implements `Drop`. Consequently, we cannot write
+ // `[Foo(i); 5]`.
+ [Foo(i), Foo(i), Foo(i), Foo(i), Foo(i)]
+ }
+
+ // Simple: drop new iterator.
+ let i = Cell::new(0);
+ {
+ IntoIter::new(five(&i));
+ }
+ assert_eq!(i.get(), 5);
+
+ // Call `next()` once.
+ let i = Cell::new(0);
+ {
+ let mut iter = IntoIter::new(five(&i));
+ let _x = iter.next();
+ assert_eq!(i.get(), 0);
+ assert_eq!(iter.count(), 4);
+ assert_eq!(i.get(), 4);
+ }
+ assert_eq!(i.get(), 5);
+
+ // Check `clone` and calling `next`/`next_back`.
+ let i = Cell::new(0);
+ {
+ let mut iter = IntoIter::new(five(&i));
+ iter.next();
+ assert_eq!(i.get(), 1);
+ iter.next_back();
+ assert_eq!(i.get(), 2);
+
+ let mut clone = iter.clone();
+ assert_eq!(i.get(), 2);
+
+ iter.next();
+ assert_eq!(i.get(), 3);
+
+ clone.next();
+ assert_eq!(i.get(), 4);
+
+ assert_eq!(clone.count(), 2);
+ assert_eq!(i.get(), 6);
+ }
+ assert_eq!(i.get(), 8);
+
+ // Check via `nth`.
+ let i = Cell::new(0);
+ {
+ let mut iter = IntoIter::new(five(&i));
+ let _x = iter.nth(2);
+ assert_eq!(i.get(), 2);
+ let _y = iter.last();
+ assert_eq!(i.get(), 3);
+ }
+ assert_eq!(i.get(), 5);
+
+ // Check every element.
+ let i = Cell::new(0);
+ for (index, _x) in IntoIter::new(five(&i)).enumerate() {
+ assert_eq!(i.get(), index);
+ }
+ assert_eq!(i.get(), 5);
+
+ let i = Cell::new(0);
+ for (index, _x) in IntoIter::new(five(&i)).rev().enumerate() {
+ assert_eq!(i.get(), index);
+ }
+ assert_eq!(i.get(), 5);
+}
+
+// This test does not work on targets without panic=unwind support.
+// To work around this problem, test is marked is should_panic, so it will
+// be automagically skipped on unsuitable targets, such as
+// wasm32-unknown-unkown.
+//
+// It means that we use panic for indicating success.
+#[test]
+#[should_panic(expected = "test succeeded")]
+fn array_default_impl_avoids_leaks_on_panic() {
+ use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+ static COUNTER: AtomicUsize = AtomicUsize::new(0);
+ #[derive(Debug)]
+ struct Bomb(usize);
+
+ impl Default for Bomb {
+ fn default() -> Bomb {
+ if COUNTER.load(Relaxed) == 3 {
+ panic!("bomb limit exceeded");
+ }
+
+ COUNTER.fetch_add(1, Relaxed);
+ Bomb(COUNTER.load(Relaxed))
+ }
+ }
+
+ impl Drop for Bomb {
+ fn drop(&mut self) {
+ COUNTER.fetch_sub(1, Relaxed);
+ }
+ }
+
+ let res = std::panic::catch_unwind(|| <[Bomb; 5]>::default());
+ let panic_msg = match res {
+ Ok(_) => unreachable!(),
+ Err(p) => p.downcast::<&'static str>().unwrap(),
+ };
+ assert_eq!(*panic_msg, "bomb limit exceeded");
+ // check that all bombs are successfully dropped
+ assert_eq!(COUNTER.load(Relaxed), 0);
+ panic!("test succeeded")
+}
+
+#[test]
+fn empty_array_is_always_default() {
+ struct DoesNotImplDefault;
+
+ let _arr = <[DoesNotImplDefault; 0]>::default();
+}
+
+#[test]
+fn array_map() {
+ let a = [1, 2, 3];
+ let b = a.map(|v| v + 1);
+ assert_eq!(b, [2, 3, 4]);
+
+ let a = [1u8, 2, 3];
+ let b = a.map(|v| v as u64);
+ assert_eq!(b, [1, 2, 3]);
+}
+
+// See note on above test for why `should_panic` is used.
+#[test]
+#[should_panic(expected = "test succeeded")]
+fn array_map_drop_safety() {
+ use core::sync::atomic::AtomicUsize;
+ use core::sync::atomic::Ordering;
+ static DROPPED: AtomicUsize = AtomicUsize::new(0);
+ struct DropCounter;
+ impl Drop for DropCounter {
+ fn drop(&mut self) {
+ DROPPED.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let num_to_create = 5;
+ let success = std::panic::catch_unwind(|| {
+ let items = [0; 10];
+ let mut nth = 0;
+ items.map(|_| {
+ assert!(nth < num_to_create);
+ nth += 1;
+ DropCounter
+ });
+ });
+ assert!(success.is_err());
+ assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+ panic!("test succeeded")
+}
+
+#[test]
+fn cell_allows_array_cycle() {
+ use core::cell::Cell;
+
+ #[derive(Debug)]
+ struct B<'a> {
+ a: [Cell<Option<&'a B<'a>>>; 2],
+ }
+
+ impl<'a> B<'a> {
+ fn new() -> B<'a> {
+ B { a: [Cell::new(None), Cell::new(None)] }
+ }
+ }
+
+ let b1 = B::new();
+ let b2 = B::new();
+ let b3 = B::new();
+
+ b1.a[0].set(Some(&b2));
+ b1.a[1].set(Some(&b3));
+
+ b2.a[0].set(Some(&b2));
+ b2.a[1].set(Some(&b3));
+
+ b3.a[0].set(Some(&b1));
+ b3.a[1].set(Some(&b2));
+}
--- /dev/null
+use core::char::from_u32;
+
+#[test]
+fn test_is_ascii() {
+ assert!(b"".is_ascii());
+ assert!(b"banana\0\x7F".is_ascii());
+ assert!(b"banana\0\x7F".iter().all(|b| b.is_ascii()));
+ assert!(!b"Vi\xe1\xbb\x87t Nam".is_ascii());
+ assert!(!b"Vi\xe1\xbb\x87t Nam".iter().all(|b| b.is_ascii()));
+ assert!(!b"\xe1\xbb\x87".iter().any(|b| b.is_ascii()));
+
+ assert!("".is_ascii());
+ assert!("banana\0\u{7F}".is_ascii());
+ assert!("banana\0\u{7F}".chars().all(|c| c.is_ascii()));
+ assert!(!"ประเทศไทย中华Việt Nam".chars().all(|c| c.is_ascii()));
+ assert!(!"ประเทศไทย中华ệ ".chars().any(|c| c.is_ascii()));
+}
+
+#[test]
+fn test_to_ascii_uppercase() {
+ assert_eq!("url()URL()uRl()ürl".to_ascii_uppercase(), "URL()URL()URL()üRL");
+ assert_eq!("hıKß".to_ascii_uppercase(), "HıKß");
+
+ for i in 0..501 {
+ let upper =
+ if 'a' as u32 <= i && i <= 'z' as u32 { i + 'A' as u32 - 'a' as u32 } else { i };
+ assert_eq!(
+ (from_u32(i).unwrap()).to_string().to_ascii_uppercase(),
+ (from_u32(upper).unwrap()).to_string()
+ );
+ }
+}
+
+#[test]
+fn test_to_ascii_lowercase() {
+ assert_eq!("url()URL()uRl()Ürl".to_ascii_lowercase(), "url()url()url()Ürl");
+ // Dotted capital I, Kelvin sign, Sharp S.
+ assert_eq!("HİKß".to_ascii_lowercase(), "hİKß");
+
+ for i in 0..501 {
+ let lower =
+ if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 } else { i };
+ assert_eq!(
+ (from_u32(i).unwrap()).to_string().to_ascii_lowercase(),
+ (from_u32(lower).unwrap()).to_string()
+ );
+ }
+}
+
+#[test]
+fn test_make_ascii_lower_case() {
+ macro_rules! test {
+ ($from: expr, $to: expr) => {{
+ let mut x = $from;
+ x.make_ascii_lowercase();
+ assert_eq!(x, $to);
+ }};
+ }
+ test!(b'A', b'a');
+ test!(b'a', b'a');
+ test!(b'!', b'!');
+ test!('A', 'a');
+ test!('À', 'À');
+ test!('a', 'a');
+ test!('!', '!');
+ test!(b"H\xc3\x89".to_vec(), b"h\xc3\x89");
+ test!("HİKß".to_string(), "hİKß");
+}
+
+#[test]
+fn test_make_ascii_upper_case() {
+ macro_rules! test {
+ ($from: expr, $to: expr) => {{
+ let mut x = $from;
+ x.make_ascii_uppercase();
+ assert_eq!(x, $to);
+ }};
+ }
+ test!(b'a', b'A');
+ test!(b'A', b'A');
+ test!(b'!', b'!');
+ test!('a', 'A');
+ test!('à', 'à');
+ test!('A', 'A');
+ test!('!', '!');
+ test!(b"h\xc3\xa9".to_vec(), b"H\xc3\xa9");
+ test!("hıKß".to_string(), "HıKß");
+
+ let mut x = "Hello".to_string();
+ x[..3].make_ascii_uppercase(); // Test IndexMut on String.
+ assert_eq!(x, "HELlo")
+}
+
+#[test]
+fn test_eq_ignore_ascii_case() {
+ assert!("url()URL()uRl()Ürl".eq_ignore_ascii_case("url()url()url()Ürl"));
+ assert!(!"Ürl".eq_ignore_ascii_case("ürl"));
+ // Dotted capital I, Kelvin sign, Sharp S.
+ assert!("HİKß".eq_ignore_ascii_case("hİKß"));
+ assert!(!"İ".eq_ignore_ascii_case("i"));
+ assert!(!"K".eq_ignore_ascii_case("k"));
+ assert!(!"ß".eq_ignore_ascii_case("s"));
+
+ for i in 0..501 {
+ let lower =
+ if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 } else { i };
+ assert!(
+ (from_u32(i).unwrap())
+ .to_string()
+ .eq_ignore_ascii_case(&from_u32(lower).unwrap().to_string())
+ );
+ }
+}
+
+#[test]
+fn inference_works() {
+ let x = "a".to_string();
+ x.eq_ignore_ascii_case("A");
+}
+
+// Shorthands used by the is_ascii_* tests.
+macro_rules! assert_all {
+ ($what:ident, $($str:tt),+) => {{
+ $(
+ for b in $str.chars() {
+ if !b.$what() {
+ panic!("expected {}({}) but it isn't",
+ stringify!($what), b);
+ }
+ }
+ for b in $str.as_bytes().iter() {
+ if !b.$what() {
+ panic!("expected {}(0x{:02x})) but it isn't",
+ stringify!($what), b);
+ }
+ }
+ )+
+ }};
+ ($what:ident, $($str:tt),+,) => (assert_all!($what,$($str),+))
+}
+macro_rules! assert_none {
+ ($what:ident, $($str:tt),+) => {{
+ $(
+ for b in $str.chars() {
+ if b.$what() {
+ panic!("expected not-{}({}) but it is",
+ stringify!($what), b);
+ }
+ }
+ for b in $str.as_bytes().iter() {
+ if b.$what() {
+ panic!("expected not-{}(0x{:02x})) but it is",
+ stringify!($what), b);
+ }
+ }
+ )+
+ }};
+ ($what:ident, $($str:tt),+,) => (assert_none!($what,$($str),+))
+}
+
+#[test]
+fn test_is_ascii_alphabetic() {
+ assert_all!(
+ is_ascii_alphabetic,
+ "",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ );
+ assert_none!(
+ is_ascii_alphabetic,
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_uppercase() {
+ assert_all!(is_ascii_uppercase, "", "ABCDEFGHIJKLMNOQPRSTUVWXYZ",);
+ assert_none!(
+ is_ascii_uppercase,
+ "abcdefghijklmnopqrstuvwxyz",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_lowercase() {
+ assert_all!(is_ascii_lowercase, "abcdefghijklmnopqrstuvwxyz",);
+ assert_none!(
+ is_ascii_lowercase,
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_alphanumeric() {
+ assert_all!(
+ is_ascii_alphanumeric,
+ "",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ );
+ assert_none!(
+ is_ascii_alphanumeric,
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_digit() {
+ assert_all!(is_ascii_digit, "", "0123456789",);
+ assert_none!(
+ is_ascii_digit,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_hexdigit() {
+ assert_all!(is_ascii_hexdigit, "", "0123456789", "abcdefABCDEF",);
+ assert_none!(
+ is_ascii_hexdigit,
+ "ghijklmnopqrstuvwxyz",
+ "GHIJKLMNOQPRSTUVWXYZ",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_punctuation() {
+ assert_all!(is_ascii_punctuation, "", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",);
+ assert_none!(
+ is_ascii_punctuation,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_graphic() {
+ assert_all!(
+ is_ascii_graphic,
+ "",
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ );
+ assert_none!(
+ is_ascii_graphic,
+ " \t\n\x0c\r",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_whitespace() {
+ assert_all!(is_ascii_whitespace, "", " \t\n\x0c\r",);
+ assert_none!(
+ is_ascii_whitespace,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x0b\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+}
+
+#[test]
+fn test_is_ascii_control() {
+ assert_all!(
+ is_ascii_control,
+ "",
+ "\x00\x01\x02\x03\x04\x05\x06\x07",
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ "\x7f",
+ );
+ assert_none!(
+ is_ascii_control,
+ "abcdefghijklmnopqrstuvwxyz",
+ "ABCDEFGHIJKLMNOQPRSTUVWXYZ",
+ "0123456789",
+ "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~",
+ " ",
+ );
+}
+
+// `is_ascii` does a good amount of pointer manipulation and has
+// alignment-dependent computation. This is all sanity-checked via
+// `debug_assert!`s, so we test various sizes/alignments thoroughly versus an
+// "obviously correct" baseline function.
+#[test]
+fn test_is_ascii_align_size_thoroughly() {
+ // The "obviously-correct" baseline mentioned above.
+ fn is_ascii_baseline(s: &[u8]) -> bool {
+ s.iter().all(|b| b.is_ascii())
+ }
+
+ // Helper to repeat `l` copies of `b0` followed by `l` copies of `b1`.
+ fn repeat_concat(b0: u8, b1: u8, l: usize) -> Vec<u8> {
+ use core::iter::repeat;
+ repeat(b0).take(l).chain(repeat(b1).take(l)).collect()
+ }
+
+ // Miri is too slow
+ let iter = if cfg!(miri) { 0..20 } else { 0..100 };
+
+ for i in iter {
+ #[cfg(not(miri))]
+ let cases = &[
+ b"a".repeat(i),
+ b"\0".repeat(i),
+ b"\x7f".repeat(i),
+ b"\x80".repeat(i),
+ b"\xff".repeat(i),
+ repeat_concat(b'a', 0x80u8, i),
+ repeat_concat(0x80u8, b'a', i),
+ ];
+
+ #[cfg(miri)]
+ let cases = &[b"a".repeat(i), b"\x80".repeat(i), repeat_concat(b'a', 0x80u8, i)];
+
+ for case in cases {
+ for pos in 0..=case.len() {
+ // Potentially misaligned head
+ let prefix = &case[pos..];
+ assert_eq!(is_ascii_baseline(prefix), prefix.is_ascii(),);
+
+ // Potentially misaligned tail
+ let suffix = &case[..case.len() - pos];
+
+ assert_eq!(is_ascii_baseline(suffix), suffix.is_ascii(),);
+
+ // Both head and tail are potentially misaligned
+ let mid = &case[(pos / 2)..(case.len() - (pos / 2))];
+ assert_eq!(is_ascii_baseline(mid), mid.is_ascii(),);
+ }
+ }
+ }
+}
+
+#[test]
+fn ascii_const() {
+ // test that the `is_ascii` methods of `char` and `u8` are usable in a const context
+
+ const CHAR_IS_ASCII: bool = 'a'.is_ascii();
+ assert!(CHAR_IS_ASCII);
+
+ const BYTE_IS_ASCII: bool = 97u8.is_ascii();
+ assert!(BYTE_IS_ASCII);
+}
--- /dev/null
+use core::sync::atomic::Ordering::SeqCst;
+use core::sync::atomic::*;
+
+#[test]
+fn bool_() {
+ let a = AtomicBool::new(false);
+ assert_eq!(a.compare_and_swap(false, true, SeqCst), false);
+ assert_eq!(a.compare_and_swap(false, true, SeqCst), true);
+
+ a.store(false, SeqCst);
+ assert_eq!(a.compare_and_swap(false, true, SeqCst), false);
+}
+
+#[test]
+fn bool_and() {
+ let a = AtomicBool::new(true);
+ assert_eq!(a.fetch_and(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), false);
+}
+
+#[test]
+fn bool_nand() {
+ let a = AtomicBool::new(false);
+ assert_eq!(a.fetch_nand(false, SeqCst), false);
+ assert_eq!(a.load(SeqCst), true);
+ assert_eq!(a.fetch_nand(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), true);
+ assert_eq!(a.fetch_nand(true, SeqCst), true);
+ assert_eq!(a.load(SeqCst), false);
+ assert_eq!(a.fetch_nand(true, SeqCst), false);
+ assert_eq!(a.load(SeqCst), true);
+}
+
+#[test]
+fn uint_and() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
+}
+
+#[test]
+fn uint_nand() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f));
+}
+
+#[test]
+fn uint_or() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
+}
+
+#[test]
+fn uint_xor() {
+ let x = AtomicUsize::new(0xf731);
+ assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
+}
+
+#[test]
+fn int_and() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
+}
+
+#[test]
+fn int_nand() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f));
+}
+
+#[test]
+fn int_or() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
+}
+
+#[test]
+fn int_xor() {
+ let x = AtomicIsize::new(0xf731);
+ assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
+ assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
+}
+
+static S_FALSE: AtomicBool = AtomicBool::new(false);
+static S_TRUE: AtomicBool = AtomicBool::new(true);
+static S_INT: AtomicIsize = AtomicIsize::new(0);
+static S_UINT: AtomicUsize = AtomicUsize::new(0);
+
+#[test]
+fn static_init() {
+ // Note that we're not really testing the mutability here but it's important
+ // on Android at the moment (#49775)
+ assert!(!S_FALSE.swap(true, SeqCst));
+ assert!(S_TRUE.swap(false, SeqCst));
+ assert!(S_INT.fetch_add(1, SeqCst) == 0);
+ assert!(S_UINT.fetch_add(1, SeqCst) == 0);
+}
--- /dev/null
+#[test]
+fn test_bool_to_option() {
+ assert_eq!(false.then_some(0), None);
+ assert_eq!(true.then_some(0), Some(0));
+ assert_eq!(false.then(|| 0), None);
+ assert_eq!(true.then(|| 0), Some(0));
+}
--- /dev/null
+use core::cell::*;
+use core::default::Default;
+use std::mem::drop;
+
+#[test]
+fn smoketest_cell() {
+ let x = Cell::new(10);
+ assert_eq!(x, Cell::new(10));
+ assert_eq!(x.get(), 10);
+ x.set(20);
+ assert_eq!(x, Cell::new(20));
+ assert_eq!(x.get(), 20);
+
+ let y = Cell::new((30, 40));
+ assert_eq!(y, Cell::new((30, 40)));
+ assert_eq!(y.get(), (30, 40));
+}
+
+#[test]
+fn cell_update() {
+ let x = Cell::new(10);
+
+ assert_eq!(x.update(|x| x + 5), 15);
+ assert_eq!(x.get(), 15);
+
+ assert_eq!(x.update(|x| x / 3), 5);
+ assert_eq!(x.get(), 5);
+}
+
+#[test]
+fn cell_has_sensible_show() {
+ let x = Cell::new("foo bar");
+ assert!(format!("{:?}", x).contains(x.get()));
+
+ x.set("baz qux");
+ assert!(format!("{:?}", x).contains(x.get()));
+}
+
+#[test]
+fn ref_and_refmut_have_sensible_show() {
+ let refcell = RefCell::new("foo");
+
+ let refcell_refmut = refcell.borrow_mut();
+ assert!(format!("{:?}", refcell_refmut).contains("foo"));
+ drop(refcell_refmut);
+
+ let refcell_ref = refcell.borrow();
+ assert!(format!("{:?}", refcell_ref).contains("foo"));
+ drop(refcell_ref);
+}
+
+#[test]
+fn double_imm_borrow() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow();
+ x.borrow();
+}
+
+#[test]
+fn no_mut_then_imm_borrow() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow_mut();
+ assert!(x.try_borrow().is_err());
+}
+
+#[test]
+fn no_imm_then_borrow_mut() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow();
+ assert!(x.try_borrow_mut().is_err());
+}
+
+#[test]
+fn no_double_borrow_mut() {
+ let x = RefCell::new(0);
+ assert!(x.try_borrow().is_ok());
+ let _b1 = x.borrow_mut();
+ assert!(x.try_borrow().is_err());
+}
+
+#[test]
+fn imm_release_borrow_mut() {
+ let x = RefCell::new(0);
+ {
+ let _b1 = x.borrow();
+ }
+ x.borrow_mut();
+}
+
+#[test]
+fn mut_release_borrow_mut() {
+ let x = RefCell::new(0);
+ {
+ let _b1 = x.borrow_mut();
+ }
+ x.borrow();
+}
+
+#[test]
+fn double_borrow_single_release_no_borrow_mut() {
+ let x = RefCell::new(0);
+ let _b1 = x.borrow();
+ {
+ let _b2 = x.borrow();
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+}
+
+#[test]
+#[should_panic]
+fn discard_doesnt_unborrow() {
+ let x = RefCell::new(0);
+ let _b = x.borrow();
+ let _ = _b;
+ let _b = x.borrow_mut();
+}
+
+#[test]
+fn ref_clone_updates_flag() {
+ let x = RefCell::new(0);
+ {
+ let b1 = x.borrow();
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let _b2 = Ref::clone(&b1);
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+}
+
+#[test]
+fn ref_map_does_not_update_flag() {
+ let x = RefCell::new(Some(5));
+ {
+ let b1: Ref<'_, Option<u32>> = x.borrow();
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let b2: Ref<'_, u32> = Ref::map(b1, |o| o.as_ref().unwrap());
+ assert_eq!(*b2, 5);
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+}
+
+#[test]
+fn ref_map_split_updates_flag() {
+ let x = RefCell::new([1, 2]);
+ {
+ let b1 = x.borrow();
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let (_b2, _b3) = Ref::map_split(b1, |slc| slc.split_at(1));
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+
+ {
+ let b1 = x.borrow_mut();
+ assert!(x.try_borrow().is_err());
+ assert!(x.try_borrow_mut().is_err());
+ {
+ let (_b2, _b3) = RefMut::map_split(b1, |slc| slc.split_at_mut(1));
+ assert!(x.try_borrow().is_err());
+ assert!(x.try_borrow_mut().is_err());
+ drop(_b2);
+ assert!(x.try_borrow().is_err());
+ assert!(x.try_borrow_mut().is_err());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+ }
+ assert!(x.try_borrow().is_ok());
+ assert!(x.try_borrow_mut().is_ok());
+}
+
+#[test]
+fn ref_map_split() {
+ let x = RefCell::new([1, 2]);
+ let (b1, b2) = Ref::map_split(x.borrow(), |slc| slc.split_at(1));
+ assert_eq!(*b1, [1]);
+ assert_eq!(*b2, [2]);
+}
+
+#[test]
+fn ref_mut_map_split() {
+ let x = RefCell::new([1, 2]);
+ {
+ let (mut b1, mut b2) = RefMut::map_split(x.borrow_mut(), |slc| slc.split_at_mut(1));
+ assert_eq!(*b1, [1]);
+ assert_eq!(*b2, [2]);
+ b1[0] = 2;
+ b2[0] = 1;
+ }
+ assert_eq!(*x.borrow(), [2, 1]);
+}
+
+#[test]
+fn ref_map_accessor() {
+ struct X(RefCell<(u32, char)>);
+ impl X {
+ fn accessor(&self) -> Ref<'_, u32> {
+ Ref::map(self.0.borrow(), |tuple| &tuple.0)
+ }
+ }
+ let x = X(RefCell::new((7, 'z')));
+ let d: Ref<'_, u32> = x.accessor();
+ assert_eq!(*d, 7);
+}
+
+#[test]
+fn ref_mut_map_accessor() {
+ struct X(RefCell<(u32, char)>);
+ impl X {
+ fn accessor(&self) -> RefMut<'_, u32> {
+ RefMut::map(self.0.borrow_mut(), |tuple| &mut tuple.0)
+ }
+ }
+ let x = X(RefCell::new((7, 'z')));
+ {
+ let mut d: RefMut<'_, u32> = x.accessor();
+ assert_eq!(*d, 7);
+ *d += 1;
+ }
+ assert_eq!(*x.0.borrow(), (8, 'z'));
+}
+
+#[test]
+fn as_ptr() {
+ let c1: Cell<usize> = Cell::new(0);
+ c1.set(1);
+ assert_eq!(1, unsafe { *c1.as_ptr() });
+
+ let c2: Cell<usize> = Cell::new(0);
+ unsafe {
+ *c2.as_ptr() = 1;
+ }
+ assert_eq!(1, c2.get());
+
+ let r1: RefCell<usize> = RefCell::new(0);
+ *r1.borrow_mut() = 1;
+ assert_eq!(1, unsafe { *r1.as_ptr() });
+
+ let r2: RefCell<usize> = RefCell::new(0);
+ unsafe {
+ *r2.as_ptr() = 1;
+ }
+ assert_eq!(1, *r2.borrow());
+}
+
+#[test]
+fn cell_default() {
+ let cell: Cell<u32> = Default::default();
+ assert_eq!(0, cell.get());
+}
+
+#[test]
+fn cell_set() {
+ let cell = Cell::new(10);
+ cell.set(20);
+ assert_eq!(20, cell.get());
+
+ let cell = Cell::new("Hello".to_owned());
+ cell.set("World".to_owned());
+ assert_eq!("World".to_owned(), cell.into_inner());
+}
+
+#[test]
+fn cell_replace() {
+ let cell = Cell::new(10);
+ assert_eq!(10, cell.replace(20));
+ assert_eq!(20, cell.get());
+
+ let cell = Cell::new("Hello".to_owned());
+ assert_eq!("Hello".to_owned(), cell.replace("World".to_owned()));
+ assert_eq!("World".to_owned(), cell.into_inner());
+}
+
+#[test]
+fn cell_into_inner() {
+ let cell = Cell::new(10);
+ assert_eq!(10, cell.into_inner());
+
+ let cell = Cell::new("Hello world".to_owned());
+ assert_eq!("Hello world".to_owned(), cell.into_inner());
+}
+
+#[test]
+fn cell_exterior() {
+ #[derive(Copy, Clone)]
+ #[allow(dead_code)]
+ struct Point {
+ x: isize,
+ y: isize,
+ z: isize,
+ }
+
+ fn f(p: &Cell<Point>) {
+ assert_eq!(p.get().z, 12);
+ p.set(Point { x: 10, y: 11, z: 13 });
+ assert_eq!(p.get().z, 13);
+ }
+
+ let a = Point { x: 10, y: 11, z: 12 };
+ let b = &Cell::new(a);
+ assert_eq!(b.get().z, 12);
+ f(b);
+ assert_eq!(a.z, 12);
+ assert_eq!(b.get().z, 13);
+}
+
+#[test]
+fn cell_does_not_clone() {
+ #[derive(Copy)]
+ #[allow(dead_code)]
+ struct Foo {
+ x: isize,
+ }
+
+ impl Clone for Foo {
+ fn clone(&self) -> Foo {
+ // Using Cell in any way should never cause clone() to be
+ // invoked -- after all, that would permit evil user code to
+ // abuse `Cell` and trigger crashes.
+
+ panic!();
+ }
+ }
+
+ let x = Cell::new(Foo { x: 22 });
+ let _y = x.get();
+ let _z = x.clone();
+}
+
+#[test]
+fn refcell_default() {
+ let cell: RefCell<u64> = Default::default();
+ assert_eq!(0, *cell.borrow());
+}
+
+#[test]
+fn unsafe_cell_unsized() {
+ let cell: &UnsafeCell<[i32]> = &UnsafeCell::new([1, 2, 3]);
+ {
+ let val: &mut [i32] = unsafe { &mut *cell.get() };
+ val[0] = 4;
+ val[2] = 5;
+ }
+ let comp: &mut [i32] = &mut [4, 2, 5];
+ assert_eq!(unsafe { &mut *cell.get() }, comp);
+}
+
+#[test]
+fn refcell_unsized() {
+ let cell: &RefCell<[i32]> = &RefCell::new([1, 2, 3]);
+ {
+ let b = &mut *cell.borrow_mut();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &mut [i32] = &mut [4, 2, 5];
+ assert_eq!(&*cell.borrow(), comp);
+}
+
+#[test]
+fn refcell_ref_coercion() {
+ let cell: RefCell<[i32; 3]> = RefCell::new([1, 2, 3]);
+ {
+ let mut cellref: RefMut<'_, [i32; 3]> = cell.borrow_mut();
+ cellref[0] = 4;
+ let mut coerced: RefMut<'_, [i32]> = cellref;
+ coerced[2] = 5;
+ }
+ {
+ let comp: &mut [i32] = &mut [4, 2, 5];
+ let cellref: Ref<'_, [i32; 3]> = cell.borrow();
+ assert_eq!(&*cellref, comp);
+ let coerced: Ref<'_, [i32]> = cellref;
+ assert_eq!(&*coerced, comp);
+ }
+}
+
+#[test]
+#[should_panic]
+fn refcell_swap_borrows() {
+ let x = RefCell::new(0);
+ let _b = x.borrow();
+ let y = RefCell::new(1);
+ x.swap(&y);
+}
+
+#[test]
+#[should_panic]
+fn refcell_replace_borrows() {
+ let x = RefCell::new(0);
+ let _b = x.borrow();
+ x.replace(1);
+}
+
+#[test]
+fn refcell_format() {
+ let name = RefCell::new("rust");
+ let what = RefCell::new("rocks");
+ let msg = format!("{name} {}", &*what.borrow(), name = &*name.borrow());
+ assert_eq!(msg, "rust rocks".to_string());
+}
+
+#[allow(dead_code)]
+fn const_cells() {
+ const UNSAFE_CELL: UnsafeCell<i32> = UnsafeCell::new(3);
+ const _: i32 = UNSAFE_CELL.into_inner();
+
+ const REF_CELL: RefCell<i32> = RefCell::new(3);
+ const _: i32 = REF_CELL.into_inner();
+
+ const CELL: Cell<i32> = Cell::new(3);
+ const _: i32 = CELL.into_inner();
+}
--- /dev/null
+use std::convert::TryFrom;
+use std::str::FromStr;
+use std::{char, str};
+
+#[test]
+fn test_convert() {
+ assert_eq!(u32::from('a'), 0x61);
+ assert_eq!(char::from(b'\0'), '\0');
+ assert_eq!(char::from(b'a'), 'a');
+ assert_eq!(char::from(b'\xFF'), '\u{FF}');
+ assert_eq!(char::try_from(0_u32), Ok('\0'));
+ assert_eq!(char::try_from(0x61_u32), Ok('a'));
+ assert_eq!(char::try_from(0xD7FF_u32), Ok('\u{D7FF}'));
+ assert!(char::try_from(0xD800_u32).is_err());
+ assert!(char::try_from(0xDFFF_u32).is_err());
+ assert_eq!(char::try_from(0xE000_u32), Ok('\u{E000}'));
+ assert_eq!(char::try_from(0x10FFFF_u32), Ok('\u{10FFFF}'));
+ assert!(char::try_from(0x110000_u32).is_err());
+ assert!(char::try_from(0xFFFF_FFFF_u32).is_err());
+}
+
+#[test]
+fn test_from_str() {
+ assert_eq!(char::from_str("a").unwrap(), 'a');
+ assert_eq!(char::from_str("\0").unwrap(), '\0');
+ assert_eq!(char::from_str("\u{D7FF}").unwrap(), '\u{d7FF}');
+ assert!(char::from_str("").is_err());
+ assert!(char::from_str("abc").is_err());
+}
+
+#[test]
+fn test_is_lowercase() {
+ assert!('a'.is_lowercase());
+ assert!('ö'.is_lowercase());
+ assert!('ß'.is_lowercase());
+ assert!(!'Ü'.is_lowercase());
+ assert!(!'P'.is_lowercase());
+}
+
+#[test]
+fn test_is_uppercase() {
+ assert!(!'h'.is_uppercase());
+ assert!(!'ä'.is_uppercase());
+ assert!(!'ß'.is_uppercase());
+ assert!('Ö'.is_uppercase());
+ assert!('T'.is_uppercase());
+}
+
+#[test]
+fn test_is_whitespace() {
+ assert!(' '.is_whitespace());
+ assert!('\u{2007}'.is_whitespace());
+ assert!('\t'.is_whitespace());
+ assert!('\n'.is_whitespace());
+ assert!(!'a'.is_whitespace());
+ assert!(!'_'.is_whitespace());
+ assert!(!'\u{0}'.is_whitespace());
+}
+
+#[test]
+fn test_to_digit() {
+ assert_eq!('0'.to_digit(10), Some(0));
+ assert_eq!('1'.to_digit(2), Some(1));
+ assert_eq!('2'.to_digit(3), Some(2));
+ assert_eq!('9'.to_digit(10), Some(9));
+ assert_eq!('a'.to_digit(16), Some(10));
+ assert_eq!('A'.to_digit(16), Some(10));
+ assert_eq!('b'.to_digit(16), Some(11));
+ assert_eq!('B'.to_digit(16), Some(11));
+ assert_eq!('z'.to_digit(36), Some(35));
+ assert_eq!('Z'.to_digit(36), Some(35));
+ assert_eq!(' '.to_digit(10), None);
+ assert_eq!('$'.to_digit(36), None);
+}
+
+#[test]
+fn test_to_lowercase() {
+ fn lower(c: char) -> String {
+ let to_lowercase = c.to_lowercase();
+ assert_eq!(to_lowercase.len(), to_lowercase.count());
+ let iter: String = c.to_lowercase().collect();
+ let disp: String = c.to_lowercase().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+ assert_eq!(lower('A'), "a");
+ assert_eq!(lower('Ö'), "ö");
+ assert_eq!(lower('ß'), "ß");
+ assert_eq!(lower('Ü'), "ü");
+ assert_eq!(lower('💩'), "💩");
+ assert_eq!(lower('Σ'), "σ");
+ assert_eq!(lower('Τ'), "τ");
+ assert_eq!(lower('Ι'), "ι");
+ assert_eq!(lower('Γ'), "γ");
+ assert_eq!(lower('Μ'), "μ");
+ assert_eq!(lower('Α'), "α");
+ assert_eq!(lower('Σ'), "σ");
+ assert_eq!(lower('Dž'), "dž");
+ assert_eq!(lower('fi'), "fi");
+ assert_eq!(lower('İ'), "i\u{307}");
+}
+
+#[test]
+fn test_to_uppercase() {
+ fn upper(c: char) -> String {
+ let to_uppercase = c.to_uppercase();
+ assert_eq!(to_uppercase.len(), to_uppercase.count());
+ let iter: String = c.to_uppercase().collect();
+ let disp: String = c.to_uppercase().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+ assert_eq!(upper('a'), "A");
+ assert_eq!(upper('ö'), "Ö");
+ assert_eq!(upper('ß'), "SS"); // not ẞ: Latin capital letter sharp s
+ assert_eq!(upper('ü'), "Ü");
+ assert_eq!(upper('💩'), "💩");
+
+ assert_eq!(upper('σ'), "Σ");
+ assert_eq!(upper('τ'), "Τ");
+ assert_eq!(upper('ι'), "Ι");
+ assert_eq!(upper('γ'), "Γ");
+ assert_eq!(upper('μ'), "Μ");
+ assert_eq!(upper('α'), "Α");
+ assert_eq!(upper('ς'), "Σ");
+ assert_eq!(upper('Dž'), "DŽ");
+ assert_eq!(upper('fi'), "FI");
+ assert_eq!(upper('ᾀ'), "ἈΙ");
+}
+
+#[test]
+fn test_is_control() {
+ assert!('\u{0}'.is_control());
+ assert!('\u{3}'.is_control());
+ assert!('\u{6}'.is_control());
+ assert!('\u{9}'.is_control());
+ assert!('\u{7f}'.is_control());
+ assert!('\u{92}'.is_control());
+ assert!(!'\u{20}'.is_control());
+ assert!(!'\u{55}'.is_control());
+ assert!(!'\u{68}'.is_control());
+}
+
+#[test]
+fn test_is_numeric() {
+ assert!('2'.is_numeric());
+ assert!('7'.is_numeric());
+ assert!('¾'.is_numeric());
+ assert!(!'c'.is_numeric());
+ assert!(!'i'.is_numeric());
+ assert!(!'z'.is_numeric());
+ assert!(!'Q'.is_numeric());
+}
+
+#[test]
+fn test_escape_debug() {
+ fn string(c: char) -> String {
+ let iter: String = c.escape_debug().collect();
+ let disp: String = c.escape_debug().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+ assert_eq!(string('\n'), "\\n");
+ assert_eq!(string('\r'), "\\r");
+ assert_eq!(string('\''), "\\'");
+ assert_eq!(string('"'), "\\\"");
+ assert_eq!(string(' '), " ");
+ assert_eq!(string('a'), "a");
+ assert_eq!(string('~'), "~");
+ assert_eq!(string('é'), "é");
+ assert_eq!(string('文'), "文");
+ assert_eq!(string('\x00'), "\\u{0}");
+ assert_eq!(string('\x1f'), "\\u{1f}");
+ assert_eq!(string('\x7f'), "\\u{7f}");
+ assert_eq!(string('\u{80}'), "\\u{80}");
+ assert_eq!(string('\u{ff}'), "\u{ff}");
+ assert_eq!(string('\u{11b}'), "\u{11b}");
+ assert_eq!(string('\u{1d4b6}'), "\u{1d4b6}");
+ assert_eq!(string('\u{301}'), "\\u{301}"); // combining character
+ assert_eq!(string('\u{200b}'), "\\u{200b}"); // zero width space
+ assert_eq!(string('\u{e000}'), "\\u{e000}"); // private use 1
+ assert_eq!(string('\u{100000}'), "\\u{100000}"); // private use 2
+}
+
+#[test]
+fn test_escape_default() {
+ fn string(c: char) -> String {
+ let iter: String = c.escape_default().collect();
+ let disp: String = c.escape_default().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+ assert_eq!(string('\n'), "\\n");
+ assert_eq!(string('\r'), "\\r");
+ assert_eq!(string('\''), "\\'");
+ assert_eq!(string('"'), "\\\"");
+ assert_eq!(string(' '), " ");
+ assert_eq!(string('a'), "a");
+ assert_eq!(string('~'), "~");
+ assert_eq!(string('é'), "\\u{e9}");
+ assert_eq!(string('\x00'), "\\u{0}");
+ assert_eq!(string('\x1f'), "\\u{1f}");
+ assert_eq!(string('\x7f'), "\\u{7f}");
+ assert_eq!(string('\u{80}'), "\\u{80}");
+ assert_eq!(string('\u{ff}'), "\\u{ff}");
+ assert_eq!(string('\u{11b}'), "\\u{11b}");
+ assert_eq!(string('\u{1d4b6}'), "\\u{1d4b6}");
+ assert_eq!(string('\u{200b}'), "\\u{200b}"); // zero width space
+ assert_eq!(string('\u{e000}'), "\\u{e000}"); // private use 1
+ assert_eq!(string('\u{100000}'), "\\u{100000}"); // private use 2
+}
+
+#[test]
+fn test_escape_unicode() {
+ fn string(c: char) -> String {
+ let iter: String = c.escape_unicode().collect();
+ let disp: String = c.escape_unicode().to_string();
+ assert_eq!(iter, disp);
+ iter
+ }
+
+ assert_eq!(string('\x00'), "\\u{0}");
+ assert_eq!(string('\n'), "\\u{a}");
+ assert_eq!(string(' '), "\\u{20}");
+ assert_eq!(string('a'), "\\u{61}");
+ assert_eq!(string('\u{11b}'), "\\u{11b}");
+ assert_eq!(string('\u{1d4b6}'), "\\u{1d4b6}");
+}
+
+#[test]
+fn test_encode_utf8() {
+ fn check(input: char, expect: &[u8]) {
+ let mut buf = [0; 4];
+ let ptr = buf.as_ptr();
+ let s = input.encode_utf8(&mut buf);
+ assert_eq!(s.as_ptr() as usize, ptr as usize);
+ assert!(str::from_utf8(s.as_bytes()).is_ok());
+ assert_eq!(s.as_bytes(), expect);
+ }
+
+ check('x', &[0x78]);
+ check('\u{e9}', &[0xc3, 0xa9]);
+ check('\u{a66e}', &[0xea, 0x99, 0xae]);
+ check('\u{1f4a9}', &[0xf0, 0x9f, 0x92, 0xa9]);
+}
+
+#[test]
+fn test_encode_utf16() {
+ fn check(input: char, expect: &[u16]) {
+ let mut buf = [0; 2];
+ let ptr = buf.as_mut_ptr();
+ let b = input.encode_utf16(&mut buf);
+ assert_eq!(b.as_mut_ptr() as usize, ptr as usize);
+ assert_eq!(b, expect);
+ }
+
+ check('x', &[0x0078]);
+ check('\u{e9}', &[0x00e9]);
+ check('\u{a66e}', &[0xa66e]);
+ check('\u{1f4a9}', &[0xd83d, 0xdca9]);
+}
+
+#[test]
+fn test_len_utf16() {
+ assert!('x'.len_utf16() == 1);
+ assert!('\u{e9}'.len_utf16() == 1);
+ assert!('\u{a66e}'.len_utf16() == 1);
+ assert!('\u{1f4a9}'.len_utf16() == 2);
+}
+
+#[test]
+fn test_decode_utf16() {
+ fn check(s: &[u16], expected: &[Result<char, u16>]) {
+ let v = char::decode_utf16(s.iter().cloned())
+ .map(|r| r.map_err(|e| e.unpaired_surrogate()))
+ .collect::<Vec<_>>();
+ assert_eq!(v, expected);
+ }
+ check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]);
+ check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]);
+}
+
+#[test]
+fn ed_iterator_specializations() {
+ // Check counting
+ assert_eq!('\n'.escape_default().count(), 2);
+ assert_eq!('c'.escape_default().count(), 1);
+ assert_eq!(' '.escape_default().count(), 1);
+ assert_eq!('\\'.escape_default().count(), 2);
+ assert_eq!('\''.escape_default().count(), 2);
+
+ // Check nth
+
+ // Check that OoB is handled correctly
+ assert_eq!('\n'.escape_default().nth(2), None);
+ assert_eq!('c'.escape_default().nth(1), None);
+ assert_eq!(' '.escape_default().nth(1), None);
+ assert_eq!('\\'.escape_default().nth(2), None);
+ assert_eq!('\''.escape_default().nth(2), None);
+
+ // Check the first char
+ assert_eq!('\n'.escape_default().nth(0), Some('\\'));
+ assert_eq!('c'.escape_default().nth(0), Some('c'));
+ assert_eq!(' '.escape_default().nth(0), Some(' '));
+ assert_eq!('\\'.escape_default().nth(0), Some('\\'));
+ assert_eq!('\''.escape_default().nth(0), Some('\\'));
+
+ // Check the second char
+ assert_eq!('\n'.escape_default().nth(1), Some('n'));
+ assert_eq!('\\'.escape_default().nth(1), Some('\\'));
+ assert_eq!('\''.escape_default().nth(1), Some('\''));
+
+ // Check the last char
+ assert_eq!('\n'.escape_default().last(), Some('n'));
+ assert_eq!('c'.escape_default().last(), Some('c'));
+ assert_eq!(' '.escape_default().last(), Some(' '));
+ assert_eq!('\\'.escape_default().last(), Some('\\'));
+ assert_eq!('\''.escape_default().last(), Some('\''));
+}
+
+#[test]
+fn eu_iterator_specializations() {
+ fn check(c: char) {
+ let len = c.escape_unicode().count();
+
+ // Check OoB
+ assert_eq!(c.escape_unicode().nth(len), None);
+
+ // For all possible in-bound offsets
+ let mut iter = c.escape_unicode();
+ for offset in 0..len {
+ // Check last
+ assert_eq!(iter.clone().last(), Some('}'));
+
+ // Check len
+ assert_eq!(iter.len(), len - offset);
+
+ // Check size_hint (= len in ExactSizeIterator)
+ assert_eq!(iter.size_hint(), (iter.len(), Some(iter.len())));
+
+ // Check counting
+ assert_eq!(iter.clone().count(), len - offset);
+
+ // Check nth
+ assert_eq!(c.escape_unicode().nth(offset), iter.next());
+ }
+
+ // Check post-last
+ assert_eq!(iter.clone().last(), None);
+ assert_eq!(iter.clone().count(), 0);
+ }
+
+ check('\u{0}');
+ check('\u{1}');
+ check('\u{12}');
+ check('\u{123}');
+ check('\u{1234}');
+ check('\u{12340}');
+ check('\u{10FFFF}');
+}
--- /dev/null
+#[test]
+fn test_borrowed_clone() {
+ let x = 5;
+ let y: &i32 = &x;
+ let z: &i32 = (&y).clone();
+ assert_eq!(*z, 5);
+}
+
+#[test]
+fn test_clone_from() {
+ let a = box 5;
+ let mut b = box 10;
+ b.clone_from(&a);
+ assert_eq!(*b, 5);
+}
--- /dev/null
+use core::cmp::{
+ self,
+ Ordering::{self, *},
+};
+
+#[test]
+fn test_int_totalord() {
+ assert_eq!(5.cmp(&10), Less);
+ assert_eq!(10.cmp(&5), Greater);
+ assert_eq!(5.cmp(&5), Equal);
+ assert_eq!((-5).cmp(&12), Less);
+ assert_eq!(12.cmp(&-5), Greater);
+}
+
+#[test]
+fn test_bool_totalord() {
+ assert_eq!(true.cmp(&false), Greater);
+ assert_eq!(false.cmp(&true), Less);
+ assert_eq!(true.cmp(&true), Equal);
+ assert_eq!(false.cmp(&false), Equal);
+}
+
+#[test]
+fn test_mut_int_totalord() {
+ assert_eq!((&mut 5).cmp(&&mut 10), Less);
+ assert_eq!((&mut 10).cmp(&&mut 5), Greater);
+ assert_eq!((&mut 5).cmp(&&mut 5), Equal);
+ assert_eq!((&mut -5).cmp(&&mut 12), Less);
+ assert_eq!((&mut 12).cmp(&&mut -5), Greater);
+}
+
+#[test]
+fn test_ord_max_min() {
+ assert_eq!(1.max(2), 2);
+ assert_eq!(2.max(1), 2);
+ assert_eq!(1.min(2), 1);
+ assert_eq!(2.min(1), 1);
+ assert_eq!(1.max(1), 1);
+ assert_eq!(1.min(1), 1);
+}
+
+#[test]
+fn test_ord_min_max_by() {
+ let f = |x: &i32, y: &i32| x.abs().cmp(&y.abs());
+ assert_eq!(cmp::min_by(1, -1, f), 1);
+ assert_eq!(cmp::min_by(1, -2, f), 1);
+ assert_eq!(cmp::min_by(2, -1, f), -1);
+ assert_eq!(cmp::max_by(1, -1, f), -1);
+ assert_eq!(cmp::max_by(1, -2, f), -2);
+ assert_eq!(cmp::max_by(2, -1, f), 2);
+}
+
+#[test]
+fn test_ord_min_max_by_key() {
+ let f = |x: &i32| x.abs();
+ assert_eq!(cmp::min_by_key(1, -1, f), 1);
+ assert_eq!(cmp::min_by_key(1, -2, f), 1);
+ assert_eq!(cmp::min_by_key(2, -1, f), -1);
+ assert_eq!(cmp::max_by_key(1, -1, f), -1);
+ assert_eq!(cmp::max_by_key(1, -2, f), -2);
+ assert_eq!(cmp::max_by_key(2, -1, f), 2);
+}
+
+#[test]
+fn test_ordering_reverse() {
+ assert_eq!(Less.reverse(), Greater);
+ assert_eq!(Equal.reverse(), Equal);
+ assert_eq!(Greater.reverse(), Less);
+}
+
+#[test]
+fn test_ordering_order() {
+ assert!(Less < Equal);
+ assert_eq!(Greater.cmp(&Less), Greater);
+}
+
+#[test]
+fn test_ordering_then() {
+ assert_eq!(Equal.then(Less), Less);
+ assert_eq!(Equal.then(Equal), Equal);
+ assert_eq!(Equal.then(Greater), Greater);
+ assert_eq!(Less.then(Less), Less);
+ assert_eq!(Less.then(Equal), Less);
+ assert_eq!(Less.then(Greater), Less);
+ assert_eq!(Greater.then(Less), Greater);
+ assert_eq!(Greater.then(Equal), Greater);
+ assert_eq!(Greater.then(Greater), Greater);
+}
+
+#[test]
+fn test_ordering_then_with() {
+ assert_eq!(Equal.then_with(|| Less), Less);
+ assert_eq!(Equal.then_with(|| Equal), Equal);
+ assert_eq!(Equal.then_with(|| Greater), Greater);
+ assert_eq!(Less.then_with(|| Less), Less);
+ assert_eq!(Less.then_with(|| Equal), Less);
+ assert_eq!(Less.then_with(|| Greater), Less);
+ assert_eq!(Greater.then_with(|| Less), Greater);
+ assert_eq!(Greater.then_with(|| Equal), Greater);
+ assert_eq!(Greater.then_with(|| Greater), Greater);
+}
+
+#[test]
+fn test_user_defined_eq() {
+ // Our type.
+ struct SketchyNum {
+ num: isize,
+ }
+
+ // Our implementation of `PartialEq` to support `==` and `!=`.
+ impl PartialEq for SketchyNum {
+ // Our custom eq allows numbers which are near each other to be equal! :D
+ fn eq(&self, other: &SketchyNum) -> bool {
+ (self.num - other.num).abs() < 5
+ }
+ }
+
+ // Now these binary operators will work when applied!
+ assert!(SketchyNum { num: 37 } == SketchyNum { num: 34 });
+ assert!(SketchyNum { num: 25 } != SketchyNum { num: 57 });
+}
+
+#[test]
+fn ordering_const() {
+ // test that the methods of `Ordering` are usable in a const context
+
+ const ORDERING: Ordering = Greater;
+
+ const REVERSE: Ordering = ORDERING.reverse();
+ assert_eq!(REVERSE, Less);
+
+ const THEN: Ordering = Equal.then(ORDERING);
+ assert_eq!(THEN, Greater);
+}
--- /dev/null
+mod debug_struct {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo").finish()
+ }
+ }
+
+ assert_eq!("Foo", format!("{:?}", Foo));
+ assert_eq!("Foo", format!("{:#?}", Foo));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo").field("bar", &true).finish()
+ }
+ }
+
+ assert_eq!("Foo { bar: true }", format!("{:?}", Foo));
+ assert_eq!(
+ "Foo {
+ bar: true,
+}",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ assert_eq!("Foo { bar: true, baz: 10/20 }", format!("{:?}", Foo));
+ assert_eq!(
+ "Foo {
+ bar: true,
+ baz: 10/20,
+}",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Bar").field("foo", &Foo).field("hello", &"world").finish()
+ }
+ }
+
+ assert_eq!(
+ "Bar { foo: Foo { bar: true, baz: 10/20 }, hello: \"world\" }",
+ format!("{:?}", Bar)
+ );
+ assert_eq!(
+ "Bar {
+ foo: Foo {
+ bar: true,
+ baz: 10/20,
+ },
+ hello: \"world\",
+}",
+ format!("{:#?}", Bar)
+ );
+ }
+
+ #[test]
+ fn test_only_non_exhaustive() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo").finish_non_exhaustive()
+ }
+ }
+
+ assert_eq!("Foo { .. }", format!("{:?}", Foo));
+ assert_eq!(
+ "Foo {
+ ..
+}",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_multiple_and_non_exhaustive() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish_non_exhaustive()
+ }
+ }
+
+ assert_eq!("Foo { bar: true, baz: 10/20, .. }", format!("{:?}", Foo));
+ assert_eq!(
+ "Foo {
+ bar: true,
+ baz: 10/20,
+ ..
+}",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_nested_non_exhaustive() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Foo")
+ .field("bar", &true)
+ .field("baz", &format_args!("{}/{}", 10, 20))
+ .finish_non_exhaustive()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("Bar")
+ .field("foo", &Foo)
+ .field("hello", &"world")
+ .finish_non_exhaustive()
+ }
+ }
+
+ assert_eq!(
+ "Bar { foo: Foo { bar: true, baz: 10/20, .. }, hello: \"world\", .. }",
+ format!("{:?}", Bar)
+ );
+ assert_eq!(
+ "Bar {
+ foo: Foo {
+ bar: true,
+ baz: 10/20,
+ ..
+ },
+ hello: \"world\",
+ ..
+}",
+ format!("{:#?}", Bar)
+ );
+ }
+}
+
+mod debug_tuple {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").finish()
+ }
+ }
+
+ assert_eq!("Foo", format!("{:?}", Foo));
+ assert_eq!("Foo", format!("{:#?}", Foo));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").field(&true).finish()
+ }
+ }
+
+ assert_eq!("Foo(true)", format!("{:?}", Foo));
+ assert_eq!(
+ "Foo(
+ true,
+)",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").field(&true).field(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ assert_eq!("Foo(true, 10/20)", format!("{:?}", Foo));
+ assert_eq!(
+ "Foo(
+ true,
+ 10/20,
+)",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Foo").field(&true).field(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_tuple("Bar").field(&Foo).field(&"world").finish()
+ }
+ }
+
+ assert_eq!("Bar(Foo(true, 10/20), \"world\")", format!("{:?}", Bar));
+ assert_eq!(
+ "Bar(
+ Foo(
+ true,
+ 10/20,
+ ),
+ \"world\",
+)",
+ format!("{:#?}", Bar)
+ );
+ }
+}
+
+mod debug_map {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().finish()
+ }
+ }
+
+ assert_eq!("{}", format!("{:?}", Foo));
+ assert_eq!("{}", format!("{:#?}", Foo));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Entry;
+
+ impl fmt::Debug for Entry {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().entry(&"bar", &true).finish()
+ }
+ }
+
+ struct KeyValue;
+
+ impl fmt::Debug for KeyValue {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().key(&"bar").value(&true).finish()
+ }
+ }
+
+ assert_eq!(format!("{:?}", Entry), format!("{:?}", KeyValue));
+ assert_eq!(format!("{:#?}", Entry), format!("{:#?}", KeyValue));
+
+ assert_eq!("{\"bar\": true}", format!("{:?}", Entry));
+ assert_eq!(
+ "{
+ \"bar\": true,
+}",
+ format!("{:#?}", Entry)
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Entry;
+
+ impl fmt::Debug for Entry {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map()
+ .entry(&"bar", &true)
+ .entry(&10, &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ struct KeyValue;
+
+ impl fmt::Debug for KeyValue {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map()
+ .key(&"bar")
+ .value(&true)
+ .key(&10)
+ .value(&format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ assert_eq!(format!("{:?}", Entry), format!("{:?}", KeyValue));
+ assert_eq!(format!("{:#?}", Entry), format!("{:#?}", KeyValue));
+
+ assert_eq!("{\"bar\": true, 10: 10/20}", format!("{:?}", Entry));
+ assert_eq!(
+ "{
+ \"bar\": true,
+ 10: 10/20,
+}",
+ format!("{:#?}", Entry)
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map()
+ .entry(&"bar", &true)
+ .entry(&10, &format_args!("{}/{}", 10, 20))
+ .finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().entry(&"foo", &Foo).entry(&Foo, &"world").finish()
+ }
+ }
+
+ assert_eq!(
+ "{\"foo\": {\"bar\": true, 10: 10/20}, \
+ {\"bar\": true, 10: 10/20}: \"world\"}",
+ format!("{:?}", Bar)
+ );
+ assert_eq!(
+ "{
+ \"foo\": {
+ \"bar\": true,
+ 10: 10/20,
+ },
+ {
+ \"bar\": true,
+ 10: 10/20,
+ }: \"world\",
+}",
+ format!("{:#?}", Bar)
+ );
+ }
+
+ #[test]
+ fn test_entry_err() {
+ // Ensure errors in a map entry don't trigger panics (#65231)
+ use std::fmt::Write;
+
+ struct ErrorFmt;
+
+ impl fmt::Debug for ErrorFmt {
+ fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Err(fmt::Error)
+ }
+ }
+
+ struct KeyValue<K, V>(usize, K, V);
+
+ impl<K, V> fmt::Debug for KeyValue<K, V>
+ where
+ K: fmt::Debug,
+ V: fmt::Debug,
+ {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut map = fmt.debug_map();
+
+ for _ in 0..self.0 {
+ map.entry(&self.1, &self.2);
+ }
+
+ map.finish()
+ }
+ }
+
+ let mut buf = String::new();
+
+ assert!(write!(&mut buf, "{:?}", KeyValue(1, ErrorFmt, "bar")).is_err());
+ assert!(write!(&mut buf, "{:?}", KeyValue(1, "foo", ErrorFmt)).is_err());
+
+ assert!(write!(&mut buf, "{:?}", KeyValue(2, ErrorFmt, "bar")).is_err());
+ assert!(write!(&mut buf, "{:?}", KeyValue(2, "foo", ErrorFmt)).is_err());
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_invalid_key_when_entry_is_incomplete() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().key(&"bar").key(&"invalid").finish()
+ }
+ }
+
+ format!("{:?}", Foo);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_invalid_finish_incomplete_entry() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().key(&"bar").finish()
+ }
+ }
+
+ format!("{:?}", Foo);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_invalid_value_before_key() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().value(&"invalid").key(&"bar").finish()
+ }
+ }
+
+ format!("{:?}", Foo);
+ }
+}
+
+mod debug_set {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().finish()
+ }
+ }
+
+ assert_eq!("{}", format!("{:?}", Foo));
+ assert_eq!("{}", format!("{:#?}", Foo));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&true).finish()
+ }
+ }
+
+ assert_eq!("{true}", format!("{:?}", Foo));
+ assert_eq!(
+ "{
+ true,
+}",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ assert_eq!("{true, 10/20}", format!("{:?}", Foo));
+ assert_eq!(
+ "{
+ true,
+ 10/20,
+}",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_set().entry(&Foo).entry(&"world").finish()
+ }
+ }
+
+ assert_eq!("{{true, 10/20}, \"world\"}", format!("{:?}", Bar));
+ assert_eq!(
+ "{
+ {
+ true,
+ 10/20,
+ },
+ \"world\",
+}",
+ format!("{:#?}", Bar)
+ );
+ }
+}
+
+mod debug_list {
+ use std::fmt;
+
+ #[test]
+ fn test_empty() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().finish()
+ }
+ }
+
+ assert_eq!("[]", format!("{:?}", Foo));
+ assert_eq!("[]", format!("{:#?}", Foo));
+ }
+
+ #[test]
+ fn test_single() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&true).finish()
+ }
+ }
+
+ assert_eq!("[true]", format!("{:?}", Foo));
+ assert_eq!(
+ "[
+ true,
+]",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_multiple() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ assert_eq!("[true, 10/20]", format!("{:?}", Foo));
+ assert_eq!(
+ "[
+ true,
+ 10/20,
+]",
+ format!("{:#?}", Foo)
+ );
+ }
+
+ #[test]
+ fn test_nested() {
+ struct Foo;
+
+ impl fmt::Debug for Foo {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&true).entry(&format_args!("{}/{}", 10, 20)).finish()
+ }
+ }
+
+ struct Bar;
+
+ impl fmt::Debug for Bar {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_list().entry(&Foo).entry(&"world").finish()
+ }
+ }
+
+ assert_eq!("[[true, 10/20], \"world\"]", format!("{:?}", Bar));
+ assert_eq!(
+ "[
+ [
+ true,
+ 10/20,
+ ],
+ \"world\",
+]",
+ format!("{:#?}", Bar)
+ );
+ }
+}
+
+#[test]
+fn test_formatting_parameters_are_forwarded() {
+ use std::collections::{BTreeMap, BTreeSet};
+ #[derive(Debug)]
+ struct Foo {
+ bar: u32,
+ baz: u32,
+ }
+ let struct_ = Foo { bar: 1024, baz: 7 };
+ let tuple = (1024, 7);
+ let list = [1024, 7];
+ let mut map = BTreeMap::new();
+ map.insert("bar", 1024);
+ map.insert("baz", 7);
+ let mut set = BTreeSet::new();
+ set.insert(1024);
+ set.insert(7);
+
+ assert_eq!(format!("{:03?}", struct_), "Foo { bar: 1024, baz: 007 }");
+ assert_eq!(format!("{:03?}", tuple), "(1024, 007)");
+ assert_eq!(format!("{:03?}", list), "[1024, 007]");
+ assert_eq!(format!("{:03?}", map), r#"{"bar": 1024, "baz": 007}"#);
+ assert_eq!(format!("{:03?}", set), "{007, 1024}");
+ assert_eq!(
+ format!("{:#03?}", struct_),
+ "
+Foo {
+ bar: 1024,
+ baz: 007,
+}
+ "
+ .trim()
+ );
+ assert_eq!(
+ format!("{:#03?}", tuple),
+ "
+(
+ 1024,
+ 007,
+)
+ "
+ .trim()
+ );
+ assert_eq!(
+ format!("{:#03?}", list),
+ "
+[
+ 1024,
+ 007,
+]
+ "
+ .trim()
+ );
+ assert_eq!(
+ format!("{:#03?}", map),
+ r#"
+{
+ "bar": 1024,
+ "baz": 007,
+}
+ "#
+ .trim()
+ );
+ assert_eq!(
+ format!("{:#03?}", set),
+ "
+{
+ 007,
+ 1024,
+}
+ "
+ .trim()
+ );
+}
--- /dev/null
+#[test]
+fn test_format_f64() {
+ assert_eq!("1", format!("{:.0}", 1.0f64));
+ assert_eq!("9", format!("{:.0}", 9.4f64));
+ assert_eq!("10", format!("{:.0}", 9.9f64));
+ assert_eq!("9.8", format!("{:.1}", 9.849f64));
+ assert_eq!("9.9", format!("{:.1}", 9.851f64));
+ assert_eq!("1", format!("{:.0}", 0.5f64));
+ assert_eq!("1.23456789e6", format!("{:e}", 1234567.89f64));
+ assert_eq!("1.23456789e3", format!("{:e}", 1234.56789f64));
+ assert_eq!("1.23456789E6", format!("{:E}", 1234567.89f64));
+ assert_eq!("1.23456789E3", format!("{:E}", 1234.56789f64));
+ assert_eq!("0.0", format!("{:?}", 0.0f64));
+ assert_eq!("1.01", format!("{:?}", 1.01f64));
+}
+
+#[test]
+fn test_format_f32() {
+ assert_eq!("1", format!("{:.0}", 1.0f32));
+ assert_eq!("9", format!("{:.0}", 9.4f32));
+ assert_eq!("10", format!("{:.0}", 9.9f32));
+ assert_eq!("9.8", format!("{:.1}", 9.849f32));
+ assert_eq!("9.9", format!("{:.1}", 9.851f32));
+ assert_eq!("1", format!("{:.0}", 0.5f32));
+ assert_eq!("1.2345679e6", format!("{:e}", 1234567.89f32));
+ assert_eq!("1.2345679e3", format!("{:e}", 1234.56789f32));
+ assert_eq!("1.2345679E6", format!("{:E}", 1234567.89f32));
+ assert_eq!("1.2345679E3", format!("{:E}", 1234.56789f32));
+ assert_eq!("0.0", format!("{:?}", 0.0f32));
+ assert_eq!("1.01", format!("{:?}", 1.01f32));
+}
--- /dev/null
+mod builders;
+mod float;
+mod num;
+
+#[test]
+fn test_format_flags() {
+ // No residual flags left by pointer formatting
+ let p = "".as_ptr();
+ assert_eq!(format!("{:p} {:x}", p, 16), format!("{:p} 10", p));
+
+ assert_eq!(format!("{: >3}", 'a'), " a");
+}
+
+#[test]
+fn test_pointer_formats_data_pointer() {
+ let b: &[u8] = b"";
+ let s: &str = "";
+ assert_eq!(format!("{:p}", s), format!("{:p}", s.as_ptr()));
+ assert_eq!(format!("{:p}", b), format!("{:p}", b.as_ptr()));
+}
+
+#[test]
+fn test_estimated_capacity() {
+ assert_eq!(format_args!("").estimated_capacity(), 0);
+ assert_eq!(format_args!("{}", "").estimated_capacity(), 0);
+ assert_eq!(format_args!("Hello").estimated_capacity(), 5);
+ assert_eq!(format_args!("Hello, {}!", "").estimated_capacity(), 16);
+ assert_eq!(format_args!("{}, hello!", "World").estimated_capacity(), 0);
+ assert_eq!(format_args!("{}. 16-bytes piece", "World").estimated_capacity(), 32);
+}
+
+#[test]
+fn pad_integral_resets() {
+ struct Bar;
+
+ impl core::fmt::Display for Bar {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ "1".fmt(f)?;
+ f.pad_integral(true, "", "5")?;
+ "1".fmt(f)
+ }
+ }
+
+ assert_eq!(format!("{:<03}", Bar), "1 0051 ");
+}
--- /dev/null
+#[test]
+fn test_format_int() {
+ // Formatting integers should select the right implementation based off
+ // the type of the argument. Also, hex/octal/binary should be defined
+ // for integers, but they shouldn't emit the negative sign.
+ assert_eq!(format!("{}", 1isize), "1");
+ assert_eq!(format!("{}", 1i8), "1");
+ assert_eq!(format!("{}", 1i16), "1");
+ assert_eq!(format!("{}", 1i32), "1");
+ assert_eq!(format!("{}", 1i64), "1");
+ assert_eq!(format!("{}", -1isize), "-1");
+ assert_eq!(format!("{}", -1i8), "-1");
+ assert_eq!(format!("{}", -1i16), "-1");
+ assert_eq!(format!("{}", -1i32), "-1");
+ assert_eq!(format!("{}", -1i64), "-1");
+ assert_eq!(format!("{:?}", 1isize), "1");
+ assert_eq!(format!("{:?}", 1i8), "1");
+ assert_eq!(format!("{:?}", 1i16), "1");
+ assert_eq!(format!("{:?}", 1i32), "1");
+ assert_eq!(format!("{:?}", 1i64), "1");
+ assert_eq!(format!("{:b}", 1isize), "1");
+ assert_eq!(format!("{:b}", 1i8), "1");
+ assert_eq!(format!("{:b}", 1i16), "1");
+ assert_eq!(format!("{:b}", 1i32), "1");
+ assert_eq!(format!("{:b}", 1i64), "1");
+ assert_eq!(format!("{:x}", 1isize), "1");
+ assert_eq!(format!("{:x}", 1i8), "1");
+ assert_eq!(format!("{:x}", 1i16), "1");
+ assert_eq!(format!("{:x}", 1i32), "1");
+ assert_eq!(format!("{:x}", 1i64), "1");
+ assert_eq!(format!("{:X}", 1isize), "1");
+ assert_eq!(format!("{:X}", 1i8), "1");
+ assert_eq!(format!("{:X}", 1i16), "1");
+ assert_eq!(format!("{:X}", 1i32), "1");
+ assert_eq!(format!("{:X}", 1i64), "1");
+ assert_eq!(format!("{:o}", 1isize), "1");
+ assert_eq!(format!("{:o}", 1i8), "1");
+ assert_eq!(format!("{:o}", 1i16), "1");
+ assert_eq!(format!("{:o}", 1i32), "1");
+ assert_eq!(format!("{:o}", 1i64), "1");
+ assert_eq!(format!("{:e}", 1isize), "1e0");
+ assert_eq!(format!("{:e}", 1i8), "1e0");
+ assert_eq!(format!("{:e}", 1i16), "1e0");
+ assert_eq!(format!("{:e}", 1i32), "1e0");
+ assert_eq!(format!("{:e}", 1i64), "1e0");
+ assert_eq!(format!("{:E}", 1isize), "1E0");
+ assert_eq!(format!("{:E}", 1i8), "1E0");
+ assert_eq!(format!("{:E}", 1i16), "1E0");
+ assert_eq!(format!("{:E}", 1i32), "1E0");
+ assert_eq!(format!("{:E}", 1i64), "1E0");
+
+ assert_eq!(format!("{}", 1usize), "1");
+ assert_eq!(format!("{}", 1u8), "1");
+ assert_eq!(format!("{}", 1u16), "1");
+ assert_eq!(format!("{}", 1u32), "1");
+ assert_eq!(format!("{}", 1u64), "1");
+ assert_eq!(format!("{:?}", 1usize), "1");
+ assert_eq!(format!("{:?}", 1u8), "1");
+ assert_eq!(format!("{:?}", 1u16), "1");
+ assert_eq!(format!("{:?}", 1u32), "1");
+ assert_eq!(format!("{:?}", 1u64), "1");
+ assert_eq!(format!("{:b}", 1usize), "1");
+ assert_eq!(format!("{:b}", 1u8), "1");
+ assert_eq!(format!("{:b}", 1u16), "1");
+ assert_eq!(format!("{:b}", 1u32), "1");
+ assert_eq!(format!("{:b}", 1u64), "1");
+ assert_eq!(format!("{:x}", 1usize), "1");
+ assert_eq!(format!("{:x}", 1u8), "1");
+ assert_eq!(format!("{:x}", 1u16), "1");
+ assert_eq!(format!("{:x}", 1u32), "1");
+ assert_eq!(format!("{:x}", 1u64), "1");
+ assert_eq!(format!("{:X}", 1usize), "1");
+ assert_eq!(format!("{:X}", 1u8), "1");
+ assert_eq!(format!("{:X}", 1u16), "1");
+ assert_eq!(format!("{:X}", 1u32), "1");
+ assert_eq!(format!("{:X}", 1u64), "1");
+ assert_eq!(format!("{:o}", 1usize), "1");
+ assert_eq!(format!("{:o}", 1u8), "1");
+ assert_eq!(format!("{:o}", 1u16), "1");
+ assert_eq!(format!("{:o}", 1u32), "1");
+ assert_eq!(format!("{:o}", 1u64), "1");
+ assert_eq!(format!("{:e}", 1u8), "1e0");
+ assert_eq!(format!("{:e}", 1u16), "1e0");
+ assert_eq!(format!("{:e}", 1u32), "1e0");
+ assert_eq!(format!("{:e}", 1u64), "1e0");
+ assert_eq!(format!("{:E}", 1u8), "1E0");
+ assert_eq!(format!("{:E}", 1u16), "1E0");
+ assert_eq!(format!("{:E}", 1u32), "1E0");
+ assert_eq!(format!("{:E}", 1u64), "1E0");
+
+ // Test a larger number
+ assert_eq!(format!("{:b}", 55), "110111");
+ assert_eq!(format!("{:o}", 55), "67");
+ assert_eq!(format!("{}", 55), "55");
+ assert_eq!(format!("{:x}", 55), "37");
+ assert_eq!(format!("{:X}", 55), "37");
+ assert_eq!(format!("{:e}", 55), "5.5e1");
+ assert_eq!(format!("{:E}", 55), "5.5E1");
+ assert_eq!(format!("{:e}", 10000000000u64), "1e10");
+ assert_eq!(format!("{:E}", 10000000000u64), "1E10");
+ assert_eq!(format!("{:e}", 10000000001u64), "1.0000000001e10");
+ assert_eq!(format!("{:E}", 10000000001u64), "1.0000000001E10");
+}
+
+#[test]
+fn test_format_int_exp_limits() {
+ assert_eq!(format!("{:e}", i8::MIN), "-1.28e2");
+ assert_eq!(format!("{:e}", i8::MAX), "1.27e2");
+ assert_eq!(format!("{:e}", i16::MIN), "-3.2768e4");
+ assert_eq!(format!("{:e}", i16::MAX), "3.2767e4");
+ assert_eq!(format!("{:e}", i32::MIN), "-2.147483648e9");
+ assert_eq!(format!("{:e}", i32::MAX), "2.147483647e9");
+ assert_eq!(format!("{:e}", i64::MIN), "-9.223372036854775808e18");
+ assert_eq!(format!("{:e}", i64::MAX), "9.223372036854775807e18");
+ assert_eq!(format!("{:e}", i128::MIN), "-1.70141183460469231731687303715884105728e38");
+ assert_eq!(format!("{:e}", i128::MAX), "1.70141183460469231731687303715884105727e38");
+
+ assert_eq!(format!("{:e}", u8::MAX), "2.55e2");
+ assert_eq!(format!("{:e}", u16::MAX), "6.5535e4");
+ assert_eq!(format!("{:e}", u32::MAX), "4.294967295e9");
+ assert_eq!(format!("{:e}", u64::MAX), "1.8446744073709551615e19");
+ assert_eq!(format!("{:e}", u128::MAX), "3.40282366920938463463374607431768211455e38");
+}
+
+#[test]
+fn test_format_int_exp_precision() {
+ //test that float and integer match
+ let big_int: u32 = 314_159_265;
+ assert_eq!(format!("{:.1e}", big_int), format!("{:.1e}", f64::from(big_int)));
+
+ //test adding precision
+ assert_eq!(format!("{:.10e}", i8::MIN), "-1.2800000000e2");
+ assert_eq!(format!("{:.10e}", i16::MIN), "-3.2768000000e4");
+ assert_eq!(format!("{:.10e}", i32::MIN), "-2.1474836480e9");
+ assert_eq!(format!("{:.20e}", i64::MIN), "-9.22337203685477580800e18");
+ assert_eq!(format!("{:.40e}", i128::MIN), "-1.7014118346046923173168730371588410572800e38");
+
+ //test rounding
+ assert_eq!(format!("{:.1e}", i8::MIN), "-1.3e2");
+ assert_eq!(format!("{:.1e}", i16::MIN), "-3.3e4");
+ assert_eq!(format!("{:.1e}", i32::MIN), "-2.1e9");
+ assert_eq!(format!("{:.1e}", i64::MIN), "-9.2e18");
+ assert_eq!(format!("{:.1e}", i128::MIN), "-1.7e38");
+
+ //test huge precision
+ assert_eq!(format!("{:.1000e}", 1), format!("1.{}e0", "0".repeat(1000)));
+ //test zero precision
+ assert_eq!(format!("{:.0e}", 1), format!("1e0",));
+
+ //test padding with precision (and sign)
+ assert_eq!(format!("{:+10.3e}", 1), " +1.000e0");
+}
+
+#[test]
+fn test_format_int_zero() {
+ assert_eq!(format!("{}", 0), "0");
+ assert_eq!(format!("{:?}", 0), "0");
+ assert_eq!(format!("{:b}", 0), "0");
+ assert_eq!(format!("{:o}", 0), "0");
+ assert_eq!(format!("{:x}", 0), "0");
+ assert_eq!(format!("{:X}", 0), "0");
+ assert_eq!(format!("{:e}", 0), "0e0");
+ assert_eq!(format!("{:E}", 0), "0E0");
+
+ assert_eq!(format!("{}", 0u32), "0");
+ assert_eq!(format!("{:?}", 0u32), "0");
+ assert_eq!(format!("{:b}", 0u32), "0");
+ assert_eq!(format!("{:o}", 0u32), "0");
+ assert_eq!(format!("{:x}", 0u32), "0");
+ assert_eq!(format!("{:X}", 0u32), "0");
+ assert_eq!(format!("{:e}", 0u32), "0e0");
+ assert_eq!(format!("{:E}", 0u32), "0E0");
+}
+
+#[test]
+fn test_format_int_flags() {
+ assert_eq!(format!("{:3}", 1), " 1");
+ assert_eq!(format!("{:>3}", 1), " 1");
+ assert_eq!(format!("{:>+3}", 1), " +1");
+ assert_eq!(format!("{:<3}", 1), "1 ");
+ assert_eq!(format!("{:#}", 1), "1");
+ assert_eq!(format!("{:#x}", 10), "0xa");
+ assert_eq!(format!("{:#X}", 10), "0xA");
+ assert_eq!(format!("{:#5x}", 10), " 0xa");
+ assert_eq!(format!("{:#o}", 10), "0o12");
+ assert_eq!(format!("{:08x}", 10), "0000000a");
+ assert_eq!(format!("{:8x}", 10), " a");
+ assert_eq!(format!("{:<8x}", 10), "a ");
+ assert_eq!(format!("{:>8x}", 10), " a");
+ assert_eq!(format!("{:#08x}", 10), "0x00000a");
+ assert_eq!(format!("{:08}", -10), "-0000010");
+ assert_eq!(format!("{:x}", !0u8), "ff");
+ assert_eq!(format!("{:X}", !0u8), "FF");
+ assert_eq!(format!("{:b}", !0u8), "11111111");
+ assert_eq!(format!("{:o}", !0u8), "377");
+ assert_eq!(format!("{:#x}", !0u8), "0xff");
+ assert_eq!(format!("{:#X}", !0u8), "0xFF");
+ assert_eq!(format!("{:#b}", !0u8), "0b11111111");
+ assert_eq!(format!("{:#o}", !0u8), "0o377");
+}
+
+#[test]
+fn test_format_int_sign_padding() {
+ assert_eq!(format!("{:+5}", 1), " +1");
+ assert_eq!(format!("{:+5}", -1), " -1");
+ assert_eq!(format!("{:05}", 1), "00001");
+ assert_eq!(format!("{:05}", -1), "-0001");
+ assert_eq!(format!("{:+05}", 1), "+0001");
+ assert_eq!(format!("{:+05}", -1), "-0001");
+}
+
+#[test]
+fn test_format_int_twos_complement() {
+ assert_eq!(format!("{}", i8::MIN), "-128");
+ assert_eq!(format!("{}", i16::MIN), "-32768");
+ assert_eq!(format!("{}", i32::MIN), "-2147483648");
+ assert_eq!(format!("{}", i64::MIN), "-9223372036854775808");
+}
+
+#[test]
+fn test_format_debug_hex() {
+ assert_eq!(format!("{:02x?}", b"Foo\0"), "[46, 6f, 6f, 00]");
+ assert_eq!(format!("{:02X?}", b"Foo\0"), "[46, 6F, 6F, 00]");
+}
--- /dev/null
+mod sip;
+
+use std::default::Default;
+use std::hash::{Hash, Hasher};
+use std::rc::Rc;
+
+struct MyHasher {
+ hash: u64,
+}
+
+impl Default for MyHasher {
+ fn default() -> MyHasher {
+ MyHasher { hash: 0 }
+ }
+}
+
+impl Hasher for MyHasher {
+ fn write(&mut self, buf: &[u8]) {
+ for byte in buf {
+ self.hash += *byte as u64;
+ }
+ }
+ fn finish(&self) -> u64 {
+ self.hash
+ }
+}
+
+#[test]
+fn test_writer_hasher() {
+ fn hash<T: Hash>(t: &T) -> u64 {
+ let mut s = MyHasher { hash: 0 };
+ t.hash(&mut s);
+ s.finish()
+ }
+
+ assert_eq!(hash(&()), 0);
+
+ assert_eq!(hash(&5_u8), 5);
+ assert_eq!(hash(&5_u16), 5);
+ assert_eq!(hash(&5_u32), 5);
+ assert_eq!(hash(&5_u64), 5);
+ assert_eq!(hash(&5_usize), 5);
+
+ assert_eq!(hash(&5_i8), 5);
+ assert_eq!(hash(&5_i16), 5);
+ assert_eq!(hash(&5_i32), 5);
+ assert_eq!(hash(&5_i64), 5);
+ assert_eq!(hash(&5_isize), 5);
+
+ assert_eq!(hash(&false), 0);
+ assert_eq!(hash(&true), 1);
+
+ assert_eq!(hash(&'a'), 97);
+
+ let s: &str = "a";
+ assert_eq!(hash(&s), 97 + 0xFF);
+ let s: Box<str> = String::from("a").into_boxed_str();
+ assert_eq!(hash(&s), 97 + 0xFF);
+ let s: Rc<&str> = Rc::new("a");
+ assert_eq!(hash(&s), 97 + 0xFF);
+ let cs: &[u8] = &[1, 2, 3];
+ assert_eq!(hash(&cs), 9);
+ let cs: Box<[u8]> = Box::new([1, 2, 3]);
+ assert_eq!(hash(&cs), 9);
+ let cs: Rc<[u8]> = Rc::new([1, 2, 3]);
+ assert_eq!(hash(&cs), 9);
+
+ let ptr = 5_usize as *const i32;
+ assert_eq!(hash(&ptr), 5);
+
+ let ptr = 5_usize as *mut i32;
+ assert_eq!(hash(&ptr), 5);
+
+ if cfg!(miri) {
+ // Miri cannot hash pointers
+ return;
+ }
+
+ let cs: &mut [u8] = &mut [1, 2, 3];
+ let ptr = cs.as_ptr();
+ let slice_ptr = cs as *const [u8];
+ assert_eq!(hash(&slice_ptr), hash(&ptr) + cs.len() as u64);
+
+ let slice_ptr = cs as *mut [u8];
+ assert_eq!(hash(&slice_ptr), hash(&ptr) + cs.len() as u64);
+}
+
+struct Custom {
+ hash: u64,
+}
+struct CustomHasher {
+ output: u64,
+}
+
+impl Hasher for CustomHasher {
+ fn finish(&self) -> u64 {
+ self.output
+ }
+ fn write(&mut self, _: &[u8]) {
+ panic!()
+ }
+ fn write_u64(&mut self, data: u64) {
+ self.output = data;
+ }
+}
+
+impl Default for CustomHasher {
+ fn default() -> CustomHasher {
+ CustomHasher { output: 0 }
+ }
+}
+
+impl Hash for Custom {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_u64(self.hash);
+ }
+}
+
+#[test]
+fn test_custom_state() {
+ fn hash<T: Hash>(t: &T) -> u64 {
+ let mut c = CustomHasher { output: 0 };
+ t.hash(&mut c);
+ c.finish()
+ }
+
+ assert_eq!(hash(&Custom { hash: 5 }), 5);
+}
+
+// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten.
+// See https://github.com/kripken/emscripten-fastcomp/issues/169
+#[cfg(not(target_os = "emscripten"))]
+#[test]
+fn test_indirect_hasher() {
+ let mut hasher = MyHasher { hash: 0 };
+ {
+ let mut indirect_hasher: &mut dyn Hasher = &mut hasher;
+ 5u32.hash(&mut indirect_hasher);
+ }
+ assert_eq!(hasher.hash, 5);
+}
--- /dev/null
+#![allow(deprecated)]
+
+use core::hash::{Hash, Hasher};
+use core::hash::{SipHasher, SipHasher13};
+use core::{mem, slice};
+
+// Hash just the bytes of the slice, without length prefix
+struct Bytes<'a>(&'a [u8]);
+
+impl<'a> Hash for Bytes<'a> {
+ #[allow(unused_must_use)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let Bytes(v) = *self;
+ state.write(v);
+ }
+}
+
+macro_rules! u8to64_le {
+ ($buf:expr, $i:expr) => {
+ $buf[0 + $i] as u64
+ | ($buf[1 + $i] as u64) << 8
+ | ($buf[2 + $i] as u64) << 16
+ | ($buf[3 + $i] as u64) << 24
+ | ($buf[4 + $i] as u64) << 32
+ | ($buf[5 + $i] as u64) << 40
+ | ($buf[6 + $i] as u64) << 48
+ | ($buf[7 + $i] as u64) << 56
+ };
+ ($buf:expr, $i:expr, $len:expr) => {{
+ let mut t = 0;
+ let mut out = 0;
+ while t < $len {
+ out |= ($buf[t + $i] as u64) << t * 8;
+ t += 1;
+ }
+ out
+ }};
+}
+
+fn hash_with<H: Hasher, T: Hash>(mut st: H, x: &T) -> u64 {
+ x.hash(&mut st);
+ st.finish()
+}
+
+fn hash<T: Hash>(x: &T) -> u64 {
+ hash_with(SipHasher::new(), x)
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash_1_3() {
+ let vecs: [[u8; 8]; 64] = [
+ [0xdc, 0xc4, 0x0f, 0x05, 0x58, 0x01, 0xac, 0xab],
+ [0x93, 0xca, 0x57, 0x7d, 0xf3, 0x9b, 0xf4, 0xc9],
+ [0x4d, 0xd4, 0xc7, 0x4d, 0x02, 0x9b, 0xcb, 0x82],
+ [0xfb, 0xf7, 0xdd, 0xe7, 0xb8, 0x0a, 0xf8, 0x8b],
+ [0x28, 0x83, 0xd3, 0x88, 0x60, 0x57, 0x75, 0xcf],
+ [0x67, 0x3b, 0x53, 0x49, 0x2f, 0xd5, 0xf9, 0xde],
+ [0xa7, 0x22, 0x9f, 0xc5, 0x50, 0x2b, 0x0d, 0xc5],
+ [0x40, 0x11, 0xb1, 0x9b, 0x98, 0x7d, 0x92, 0xd3],
+ [0x8e, 0x9a, 0x29, 0x8d, 0x11, 0x95, 0x90, 0x36],
+ [0xe4, 0x3d, 0x06, 0x6c, 0xb3, 0x8e, 0xa4, 0x25],
+ [0x7f, 0x09, 0xff, 0x92, 0xee, 0x85, 0xde, 0x79],
+ [0x52, 0xc3, 0x4d, 0xf9, 0xc1, 0x18, 0xc1, 0x70],
+ [0xa2, 0xd9, 0xb4, 0x57, 0xb1, 0x84, 0xa3, 0x78],
+ [0xa7, 0xff, 0x29, 0x12, 0x0c, 0x76, 0x6f, 0x30],
+ [0x34, 0x5d, 0xf9, 0xc0, 0x11, 0xa1, 0x5a, 0x60],
+ [0x56, 0x99, 0x51, 0x2a, 0x6d, 0xd8, 0x20, 0xd3],
+ [0x66, 0x8b, 0x90, 0x7d, 0x1a, 0xdd, 0x4f, 0xcc],
+ [0x0c, 0xd8, 0xdb, 0x63, 0x90, 0x68, 0xf2, 0x9c],
+ [0x3e, 0xe6, 0x73, 0xb4, 0x9c, 0x38, 0xfc, 0x8f],
+ [0x1c, 0x7d, 0x29, 0x8d, 0xe5, 0x9d, 0x1f, 0xf2],
+ [0x40, 0xe0, 0xcc, 0xa6, 0x46, 0x2f, 0xdc, 0xc0],
+ [0x44, 0xf8, 0x45, 0x2b, 0xfe, 0xab, 0x92, 0xb9],
+ [0x2e, 0x87, 0x20, 0xa3, 0x9b, 0x7b, 0xfe, 0x7f],
+ [0x23, 0xc1, 0xe6, 0xda, 0x7f, 0x0e, 0x5a, 0x52],
+ [0x8c, 0x9c, 0x34, 0x67, 0xb2, 0xae, 0x64, 0xf4],
+ [0x79, 0x09, 0x5b, 0x70, 0x28, 0x59, 0xcd, 0x45],
+ [0xa5, 0x13, 0x99, 0xca, 0xe3, 0x35, 0x3e, 0x3a],
+ [0x35, 0x3b, 0xde, 0x4a, 0x4e, 0xc7, 0x1d, 0xa9],
+ [0x0d, 0xd0, 0x6c, 0xef, 0x02, 0xed, 0x0b, 0xfb],
+ [0xf4, 0xe1, 0xb1, 0x4a, 0xb4, 0x3c, 0xd9, 0x88],
+ [0x63, 0xe6, 0xc5, 0x43, 0xd6, 0x11, 0x0f, 0x54],
+ [0xbc, 0xd1, 0x21, 0x8c, 0x1f, 0xdd, 0x70, 0x23],
+ [0x0d, 0xb6, 0xa7, 0x16, 0x6c, 0x7b, 0x15, 0x81],
+ [0xbf, 0xf9, 0x8f, 0x7a, 0xe5, 0xb9, 0x54, 0x4d],
+ [0x3e, 0x75, 0x2a, 0x1f, 0x78, 0x12, 0x9f, 0x75],
+ [0x91, 0x6b, 0x18, 0xbf, 0xbe, 0xa3, 0xa1, 0xce],
+ [0x06, 0x62, 0xa2, 0xad, 0xd3, 0x08, 0xf5, 0x2c],
+ [0x57, 0x30, 0xc3, 0xa3, 0x2d, 0x1c, 0x10, 0xb6],
+ [0xa1, 0x36, 0x3a, 0xae, 0x96, 0x74, 0xf4, 0xb3],
+ [0x92, 0x83, 0x10, 0x7b, 0x54, 0x57, 0x6b, 0x62],
+ [0x31, 0x15, 0xe4, 0x99, 0x32, 0x36, 0xd2, 0xc1],
+ [0x44, 0xd9, 0x1a, 0x3f, 0x92, 0xc1, 0x7c, 0x66],
+ [0x25, 0x88, 0x13, 0xc8, 0xfe, 0x4f, 0x70, 0x65],
+ [0xa6, 0x49, 0x89, 0xc2, 0xd1, 0x80, 0xf2, 0x24],
+ [0x6b, 0x87, 0xf8, 0xfa, 0xed, 0x1c, 0xca, 0xc2],
+ [0x96, 0x21, 0x04, 0x9f, 0xfc, 0x4b, 0x16, 0xc2],
+ [0x23, 0xd6, 0xb1, 0x68, 0x93, 0x9c, 0x6e, 0xa1],
+ [0xfd, 0x14, 0x51, 0x8b, 0x9c, 0x16, 0xfb, 0x49],
+ [0x46, 0x4c, 0x07, 0xdf, 0xf8, 0x43, 0x31, 0x9f],
+ [0xb3, 0x86, 0xcc, 0x12, 0x24, 0xaf, 0xfd, 0xc6],
+ [0x8f, 0x09, 0x52, 0x0a, 0xd1, 0x49, 0xaf, 0x7e],
+ [0x9a, 0x2f, 0x29, 0x9d, 0x55, 0x13, 0xf3, 0x1c],
+ [0x12, 0x1f, 0xf4, 0xa2, 0xdd, 0x30, 0x4a, 0xc4],
+ [0xd0, 0x1e, 0xa7, 0x43, 0x89, 0xe9, 0xfa, 0x36],
+ [0xe6, 0xbc, 0xf0, 0x73, 0x4c, 0xb3, 0x8f, 0x31],
+ [0x80, 0xe9, 0xa7, 0x70, 0x36, 0xbf, 0x7a, 0xa2],
+ [0x75, 0x6d, 0x3c, 0x24, 0xdb, 0xc0, 0xbc, 0xb4],
+ [0x13, 0x15, 0xb7, 0xfd, 0x52, 0xd8, 0xf8, 0x23],
+ [0x08, 0x8a, 0x7d, 0xa6, 0x4d, 0x5f, 0x03, 0x8f],
+ [0x48, 0xf1, 0xe8, 0xb7, 0xe5, 0xd0, 0x9c, 0xd8],
+ [0xee, 0x44, 0xa6, 0xf7, 0xbc, 0xe6, 0xf4, 0xf6],
+ [0xf2, 0x37, 0x18, 0x0f, 0xd8, 0x9a, 0xc5, 0xae],
+ [0xe0, 0x94, 0x66, 0x4b, 0x15, 0xf6, 0xb2, 0xc3],
+ [0xa8, 0xb3, 0xbb, 0xb7, 0x62, 0x90, 0x19, 0x9d],
+ ];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher13::new_with_keys(k0, k1);
+
+ while t < 64 {
+ let vec = u8to64_le!(vecs[t], 0);
+ let out = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out);
+
+ let full = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+#[allow(unused_must_use)]
+fn test_siphash_2_4() {
+ let vecs: [[u8; 8]; 64] = [
+ [0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72],
+ [0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74],
+ [0x5a, 0x4f, 0xa9, 0xd9, 0x09, 0x80, 0x6c, 0x0d],
+ [0x2d, 0x7e, 0xfb, 0xd7, 0x96, 0x66, 0x67, 0x85],
+ [0xb7, 0x87, 0x71, 0x27, 0xe0, 0x94, 0x27, 0xcf],
+ [0x8d, 0xa6, 0x99, 0xcd, 0x64, 0x55, 0x76, 0x18],
+ [0xce, 0xe3, 0xfe, 0x58, 0x6e, 0x46, 0xc9, 0xcb],
+ [0x37, 0xd1, 0x01, 0x8b, 0xf5, 0x00, 0x02, 0xab],
+ [0x62, 0x24, 0x93, 0x9a, 0x79, 0xf5, 0xf5, 0x93],
+ [0xb0, 0xe4, 0xa9, 0x0b, 0xdf, 0x82, 0x00, 0x9e],
+ [0xf3, 0xb9, 0xdd, 0x94, 0xc5, 0xbb, 0x5d, 0x7a],
+ [0xa7, 0xad, 0x6b, 0x22, 0x46, 0x2f, 0xb3, 0xf4],
+ [0xfb, 0xe5, 0x0e, 0x86, 0xbc, 0x8f, 0x1e, 0x75],
+ [0x90, 0x3d, 0x84, 0xc0, 0x27, 0x56, 0xea, 0x14],
+ [0xee, 0xf2, 0x7a, 0x8e, 0x90, 0xca, 0x23, 0xf7],
+ [0xe5, 0x45, 0xbe, 0x49, 0x61, 0xca, 0x29, 0xa1],
+ [0xdb, 0x9b, 0xc2, 0x57, 0x7f, 0xcc, 0x2a, 0x3f],
+ [0x94, 0x47, 0xbe, 0x2c, 0xf5, 0xe9, 0x9a, 0x69],
+ [0x9c, 0xd3, 0x8d, 0x96, 0xf0, 0xb3, 0xc1, 0x4b],
+ [0xbd, 0x61, 0x79, 0xa7, 0x1d, 0xc9, 0x6d, 0xbb],
+ [0x98, 0xee, 0xa2, 0x1a, 0xf2, 0x5c, 0xd6, 0xbe],
+ [0xc7, 0x67, 0x3b, 0x2e, 0xb0, 0xcb, 0xf2, 0xd0],
+ [0x88, 0x3e, 0xa3, 0xe3, 0x95, 0x67, 0x53, 0x93],
+ [0xc8, 0xce, 0x5c, 0xcd, 0x8c, 0x03, 0x0c, 0xa8],
+ [0x94, 0xaf, 0x49, 0xf6, 0xc6, 0x50, 0xad, 0xb8],
+ [0xea, 0xb8, 0x85, 0x8a, 0xde, 0x92, 0xe1, 0xbc],
+ [0xf3, 0x15, 0xbb, 0x5b, 0xb8, 0x35, 0xd8, 0x17],
+ [0xad, 0xcf, 0x6b, 0x07, 0x63, 0x61, 0x2e, 0x2f],
+ [0xa5, 0xc9, 0x1d, 0xa7, 0xac, 0xaa, 0x4d, 0xde],
+ [0x71, 0x65, 0x95, 0x87, 0x66, 0x50, 0xa2, 0xa6],
+ [0x28, 0xef, 0x49, 0x5c, 0x53, 0xa3, 0x87, 0xad],
+ [0x42, 0xc3, 0x41, 0xd8, 0xfa, 0x92, 0xd8, 0x32],
+ [0xce, 0x7c, 0xf2, 0x72, 0x2f, 0x51, 0x27, 0x71],
+ [0xe3, 0x78, 0x59, 0xf9, 0x46, 0x23, 0xf3, 0xa7],
+ [0x38, 0x12, 0x05, 0xbb, 0x1a, 0xb0, 0xe0, 0x12],
+ [0xae, 0x97, 0xa1, 0x0f, 0xd4, 0x34, 0xe0, 0x15],
+ [0xb4, 0xa3, 0x15, 0x08, 0xbe, 0xff, 0x4d, 0x31],
+ [0x81, 0x39, 0x62, 0x29, 0xf0, 0x90, 0x79, 0x02],
+ [0x4d, 0x0c, 0xf4, 0x9e, 0xe5, 0xd4, 0xdc, 0xca],
+ [0x5c, 0x73, 0x33, 0x6a, 0x76, 0xd8, 0xbf, 0x9a],
+ [0xd0, 0xa7, 0x04, 0x53, 0x6b, 0xa9, 0x3e, 0x0e],
+ [0x92, 0x59, 0x58, 0xfc, 0xd6, 0x42, 0x0c, 0xad],
+ [0xa9, 0x15, 0xc2, 0x9b, 0xc8, 0x06, 0x73, 0x18],
+ [0x95, 0x2b, 0x79, 0xf3, 0xbc, 0x0a, 0xa6, 0xd4],
+ [0xf2, 0x1d, 0xf2, 0xe4, 0x1d, 0x45, 0x35, 0xf9],
+ [0x87, 0x57, 0x75, 0x19, 0x04, 0x8f, 0x53, 0xa9],
+ [0x10, 0xa5, 0x6c, 0xf5, 0xdf, 0xcd, 0x9a, 0xdb],
+ [0xeb, 0x75, 0x09, 0x5c, 0xcd, 0x98, 0x6c, 0xd0],
+ [0x51, 0xa9, 0xcb, 0x9e, 0xcb, 0xa3, 0x12, 0xe6],
+ [0x96, 0xaf, 0xad, 0xfc, 0x2c, 0xe6, 0x66, 0xc7],
+ [0x72, 0xfe, 0x52, 0x97, 0x5a, 0x43, 0x64, 0xee],
+ [0x5a, 0x16, 0x45, 0xb2, 0x76, 0xd5, 0x92, 0xa1],
+ [0xb2, 0x74, 0xcb, 0x8e, 0xbf, 0x87, 0x87, 0x0a],
+ [0x6f, 0x9b, 0xb4, 0x20, 0x3d, 0xe7, 0xb3, 0x81],
+ [0xea, 0xec, 0xb2, 0xa3, 0x0b, 0x22, 0xa8, 0x7f],
+ [0x99, 0x24, 0xa4, 0x3c, 0xc1, 0x31, 0x57, 0x24],
+ [0xbd, 0x83, 0x8d, 0x3a, 0xaf, 0xbf, 0x8d, 0xb7],
+ [0x0b, 0x1a, 0x2a, 0x32, 0x65, 0xd5, 0x1a, 0xea],
+ [0x13, 0x50, 0x79, 0xa3, 0x23, 0x1c, 0xe6, 0x60],
+ [0x93, 0x2b, 0x28, 0x46, 0xe4, 0xd7, 0x06, 0x66],
+ [0xe1, 0x91, 0x5f, 0x5c, 0xb1, 0xec, 0xa4, 0x6c],
+ [0xf3, 0x25, 0x96, 0x5c, 0xa1, 0x6d, 0x62, 0x9f],
+ [0x57, 0x5f, 0xf2, 0x8e, 0x60, 0x38, 0x1b, 0xe5],
+ [0x72, 0x45, 0x06, 0xeb, 0x4c, 0x32, 0x8a, 0x95],
+ ];
+
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+ let mut buf = Vec::new();
+ let mut t = 0;
+ let mut state_inc = SipHasher::new_with_keys(k0, k1);
+
+ while t < 64 {
+ let vec = u8to64_le!(vecs[t], 0);
+ let out = hash_with(SipHasher::new_with_keys(k0, k1), &Bytes(&buf));
+ assert_eq!(vec, out);
+
+ let full = hash_with(SipHasher::new_with_keys(k0, k1), &Bytes(&buf));
+ let i = state_inc.finish();
+
+ assert_eq!(full, i);
+ assert_eq!(full, vec);
+
+ buf.push(t as u8);
+ Hasher::write(&mut state_inc, &[t as u8]);
+
+ t += 1;
+ }
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert_ne!(hash(&(val as u64)), hash(&(val as usize)));
+ assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert_eq!(hash(&(val as u64)), hash(&(val as usize)));
+ assert_ne!(hash(&(val as u32)), hash(&(val as usize)));
+}
+
+#[test]
+fn test_hash_idempotent() {
+ let val64 = 0xdeadbeef_deadbeef_u64;
+ assert_eq!(hash(&val64), hash(&val64));
+ let val32 = 0xdeadbeef_u32;
+ assert_eq!(hash(&val32), hash(&val32));
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_64() {
+ let val = 0xdeadbeef_deadbeef_u64;
+
+ assert_ne!(hash(&val), hash(&zero_byte(val, 0)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 1)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 2)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 3)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 4)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 5)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 6)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 7)));
+
+ fn zero_byte(val: u64, byte: usize) -> u64 {
+ assert!(byte < 8);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_32() {
+ let val = 0xdeadbeef_u32;
+
+ assert_ne!(hash(&val), hash(&zero_byte(val, 0)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 1)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 2)));
+ assert_ne!(hash(&val), hash(&zero_byte(val, 3)));
+
+ fn zero_byte(val: u32, byte: usize) -> u32 {
+ assert!(byte < 4);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_concat_alias() {
+ let s = ("aa", "bb");
+ let t = ("aabb", "");
+ let u = ("a", "abb");
+
+ assert_ne!(s, t);
+ assert_ne!(t, u);
+ assert_ne!(hash(&s), hash(&t));
+ assert_ne!(hash(&s), hash(&u));
+
+ let u = [1, 0, 0, 0];
+ let v = (&u[..1], &u[1..3], &u[3..]);
+ let w = (&u[..], &u[4..4], &u[4..4]);
+
+ assert_ne!(v, w);
+ assert_ne!(hash(&v), hash(&w));
+}
+
+#[test]
+fn test_write_short_works() {
+ let test_usize = 0xd0c0b0a0usize;
+ let mut h1 = SipHasher::new();
+ h1.write_usize(test_usize);
+ h1.write(b"bytes");
+ h1.write(b"string");
+ h1.write_u8(0xFFu8);
+ h1.write_u8(0x01u8);
+ let mut h2 = SipHasher::new();
+ h2.write(unsafe {
+ slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::<usize>())
+ });
+ h2.write(b"bytes");
+ h2.write(b"string");
+ h2.write(&[0xFFu8, 0x01u8]);
+ assert_eq!(h1.finish(), h2.finish());
+}
--- /dev/null
+use core::any::TypeId;
+use core::intrinsics::assume;
+
+#[test]
+fn test_typeid_sized_types() {
+ struct X;
+ struct Y(u32);
+
+ assert_eq!(TypeId::of::<X>(), TypeId::of::<X>());
+ assert_eq!(TypeId::of::<Y>(), TypeId::of::<Y>());
+ assert!(TypeId::of::<X>() != TypeId::of::<Y>());
+}
+
+#[test]
+fn test_typeid_unsized_types() {
+ trait Z {}
+ struct X(str);
+ struct Y(dyn Z + 'static);
+
+ assert_eq!(TypeId::of::<X>(), TypeId::of::<X>());
+ assert_eq!(TypeId::of::<Y>(), TypeId::of::<Y>());
+ assert!(TypeId::of::<X>() != TypeId::of::<Y>());
+}
+
+// Check that `const_assume` feature allow `assume` intrinsic
+// to be used in const contexts.
+#[test]
+fn test_assume_can_be_in_const_contexts() {
+ const unsafe fn foo(x: usize, y: usize) -> usize {
+ // SAFETY: the entire function is not safe,
+ // but it is just an example not used elsewhere.
+ unsafe { assume(y != 0) };
+ x / y
+ }
+ let rs = unsafe { foo(42, 97) };
+ assert_eq!(rs, 0);
+}
--- /dev/null
+// ignore-tidy-filelength
+
+use core::cell::Cell;
+use core::convert::TryFrom;
+use core::iter::*;
+
+/// An iterator wrapper that panics whenever `next` or `next_back` is called
+/// after `None` has been returned.
+struct Unfuse<I> {
+ iter: I,
+ exhausted: bool,
+}
+
+fn unfuse<I: IntoIterator>(iter: I) -> Unfuse<I::IntoIter> {
+ Unfuse { iter: iter.into_iter(), exhausted: false }
+}
+
+impl<I> Iterator for Unfuse<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ assert!(!self.exhausted);
+ let next = self.iter.next();
+ self.exhausted = next.is_none();
+ next
+ }
+}
+
+impl<I> DoubleEndedIterator for Unfuse<I>
+where
+ I: DoubleEndedIterator,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ assert!(!self.exhausted);
+ let next = self.iter.next_back();
+ self.exhausted = next.is_none();
+ next
+ }
+}
+
+#[test]
+fn test_lt() {
+ let empty: [isize; 0] = [];
+ let xs = [1, 2, 3];
+ let ys = [1, 2, 0];
+
+ assert!(!xs.iter().lt(ys.iter()));
+ assert!(!xs.iter().le(ys.iter()));
+ assert!(xs.iter().gt(ys.iter()));
+ assert!(xs.iter().ge(ys.iter()));
+
+ assert!(ys.iter().lt(xs.iter()));
+ assert!(ys.iter().le(xs.iter()));
+ assert!(!ys.iter().gt(xs.iter()));
+ assert!(!ys.iter().ge(xs.iter()));
+
+ assert!(empty.iter().lt(xs.iter()));
+ assert!(empty.iter().le(xs.iter()));
+ assert!(!empty.iter().gt(xs.iter()));
+ assert!(!empty.iter().ge(xs.iter()));
+
+ // Sequence with NaN
+ let u = [1.0f64, 2.0];
+ let v = [0.0f64 / 0.0, 3.0];
+
+ assert!(!u.iter().lt(v.iter()));
+ assert!(!u.iter().le(v.iter()));
+ assert!(!u.iter().gt(v.iter()));
+ assert!(!u.iter().ge(v.iter()));
+
+ let a = [0.0f64 / 0.0];
+ let b = [1.0f64];
+ let c = [2.0f64];
+
+ assert!(a.iter().lt(b.iter()) == (a[0] < b[0]));
+ assert!(a.iter().le(b.iter()) == (a[0] <= b[0]));
+ assert!(a.iter().gt(b.iter()) == (a[0] > b[0]));
+ assert!(a.iter().ge(b.iter()) == (a[0] >= b[0]));
+
+ assert!(c.iter().lt(b.iter()) == (c[0] < b[0]));
+ assert!(c.iter().le(b.iter()) == (c[0] <= b[0]));
+ assert!(c.iter().gt(b.iter()) == (c[0] > b[0]));
+ assert!(c.iter().ge(b.iter()) == (c[0] >= b[0]));
+}
+
+#[test]
+fn test_multi_iter() {
+ let xs = [1, 2, 3, 4];
+ let ys = [4, 3, 2, 1];
+ assert!(xs.iter().eq(ys.iter().rev()));
+ assert!(xs.iter().lt(xs.iter().skip(2)));
+}
+
+#[test]
+fn test_cmp_by() {
+ use core::cmp::Ordering;
+
+ let f = |x: i32, y: i32| (x * x).cmp(&y);
+ let xs = || [1, 2, 3, 4].iter().copied();
+ let ys = || [1, 4, 16].iter().copied();
+
+ assert_eq!(xs().cmp_by(ys(), f), Ordering::Less);
+ assert_eq!(ys().cmp_by(xs(), f), Ordering::Greater);
+ assert_eq!(xs().cmp_by(xs().map(|x| x * x), f), Ordering::Equal);
+ assert_eq!(xs().rev().cmp_by(ys().rev(), f), Ordering::Greater);
+ assert_eq!(xs().cmp_by(ys().rev(), f), Ordering::Less);
+ assert_eq!(xs().cmp_by(ys().take(2), f), Ordering::Greater);
+}
+
+#[test]
+fn test_partial_cmp_by() {
+ use core::cmp::Ordering;
+
+ let f = |x: i32, y: i32| (x * x).partial_cmp(&y);
+ let xs = || [1, 2, 3, 4].iter().copied();
+ let ys = || [1, 4, 16].iter().copied();
+
+ assert_eq!(xs().partial_cmp_by(ys(), f), Some(Ordering::Less));
+ assert_eq!(ys().partial_cmp_by(xs(), f), Some(Ordering::Greater));
+ assert_eq!(xs().partial_cmp_by(xs().map(|x| x * x), f), Some(Ordering::Equal));
+ assert_eq!(xs().rev().partial_cmp_by(ys().rev(), f), Some(Ordering::Greater));
+ assert_eq!(xs().partial_cmp_by(xs().rev(), f), Some(Ordering::Less));
+ assert_eq!(xs().partial_cmp_by(ys().take(2), f), Some(Ordering::Greater));
+
+ let f = |x: f64, y: f64| (x * x).partial_cmp(&y);
+ let xs = || [1.0, 2.0, 3.0, 4.0].iter().copied();
+ let ys = || [1.0, 4.0, f64::NAN, 16.0].iter().copied();
+
+ assert_eq!(xs().partial_cmp_by(ys(), f), None);
+ assert_eq!(ys().partial_cmp_by(xs(), f), Some(Ordering::Greater));
+}
+
+#[test]
+fn test_eq_by() {
+ let f = |x: i32, y: i32| x * x == y;
+ let xs = || [1, 2, 3, 4].iter().copied();
+ let ys = || [1, 4, 9, 16].iter().copied();
+
+ assert!(xs().eq_by(ys(), f));
+ assert!(!ys().eq_by(xs(), f));
+ assert!(!xs().eq_by(xs(), f));
+ assert!(!ys().eq_by(ys(), f));
+
+ assert!(!xs().take(3).eq_by(ys(), f));
+ assert!(!xs().eq_by(ys().take(3), f));
+ assert!(xs().take(3).eq_by(ys().take(3), f));
+}
+
+#[test]
+fn test_counter_from_iter() {
+ let it = (0..).step_by(5).take(10);
+ let xs: Vec<isize> = FromIterator::from_iter(it);
+ assert_eq!(xs, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45]);
+}
+
+#[test]
+fn test_iterator_chain() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60];
+ let it = xs.iter().chain(&ys);
+ let mut i = 0;
+ for &x in it {
+ assert_eq!(x, expected[i]);
+ i += 1;
+ }
+ assert_eq!(i, expected.len());
+
+ let ys = (30..).step_by(10).take(4);
+ let it = xs.iter().cloned().chain(ys);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, expected[i]);
+ i += 1;
+ }
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_iterator_chain_advance_by() {
+ fn test_chain(xs: &[i32], ys: &[i32]) {
+ let len = xs.len() + ys.len();
+
+ for i in 0..xs.len() {
+ let mut iter = unfuse(xs).chain(unfuse(ys));
+ iter.advance_by(i).unwrap();
+ assert_eq!(iter.next(), Some(&xs[i]));
+ assert_eq!(iter.advance_by(100), Err(len - i - 1));
+ }
+
+ for i in 0..ys.len() {
+ let mut iter = unfuse(xs).chain(unfuse(ys));
+ iter.advance_by(xs.len() + i).unwrap();
+ assert_eq!(iter.next(), Some(&ys[i]));
+ assert_eq!(iter.advance_by(100), Err(ys.len() - i - 1));
+ }
+
+ let mut iter = xs.iter().chain(ys);
+ iter.advance_by(len).unwrap();
+ assert_eq!(iter.next(), None);
+
+ let mut iter = xs.iter().chain(ys);
+ assert_eq!(iter.advance_by(len + 1), Err(len));
+ }
+
+ test_chain(&[], &[]);
+ test_chain(&[], &[0, 1, 2, 3, 4, 5]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[30, 40, 50, 60]);
+}
+
+#[test]
+fn test_iterator_chain_advance_back_by() {
+ fn test_chain(xs: &[i32], ys: &[i32]) {
+ let len = xs.len() + ys.len();
+
+ for i in 0..ys.len() {
+ let mut iter = unfuse(xs).chain(unfuse(ys));
+ iter.advance_back_by(i).unwrap();
+ assert_eq!(iter.next_back(), Some(&ys[ys.len() - i - 1]));
+ assert_eq!(iter.advance_back_by(100), Err(len - i - 1));
+ }
+
+ for i in 0..xs.len() {
+ let mut iter = unfuse(xs).chain(unfuse(ys));
+ iter.advance_back_by(ys.len() + i).unwrap();
+ assert_eq!(iter.next_back(), Some(&xs[xs.len() - i - 1]));
+ assert_eq!(iter.advance_back_by(100), Err(xs.len() - i - 1));
+ }
+
+ let mut iter = xs.iter().chain(ys);
+ iter.advance_back_by(len).unwrap();
+ assert_eq!(iter.next_back(), None);
+
+ let mut iter = xs.iter().chain(ys);
+ assert_eq!(iter.advance_back_by(len + 1), Err(len));
+ }
+
+ test_chain(&[], &[]);
+ test_chain(&[], &[0, 1, 2, 3, 4, 5]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[]);
+ test_chain(&[0, 1, 2, 3, 4, 5], &[30, 40, 50, 60]);
+}
+
+#[test]
+fn test_iterator_chain_nth() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60];
+ for (i, x) in expected.iter().enumerate() {
+ assert_eq!(Some(x), xs.iter().chain(&ys).nth(i));
+ }
+ assert_eq!(zs.iter().chain(&xs).nth(0), Some(&0));
+
+ let mut it = xs.iter().chain(&zs);
+ assert_eq!(it.nth(5), Some(&5));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_chain_nth_back() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60];
+ for (i, x) in expected.iter().rev().enumerate() {
+ assert_eq!(Some(x), xs.iter().chain(&ys).nth_back(i));
+ }
+ assert_eq!(zs.iter().chain(&xs).nth_back(0), Some(&5));
+
+ let mut it = xs.iter().chain(&zs);
+ assert_eq!(it.nth_back(5), Some(&0));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_chain_last() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ assert_eq!(xs.iter().chain(&ys).last(), Some(&60));
+ assert_eq!(zs.iter().chain(&ys).last(), Some(&60));
+ assert_eq!(ys.iter().chain(&zs).last(), Some(&60));
+ assert_eq!(zs.iter().chain(&zs).last(), None);
+}
+
+#[test]
+fn test_iterator_chain_count() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let zs = [];
+ assert_eq!(xs.iter().chain(&ys).count(), 10);
+ assert_eq!(zs.iter().chain(&ys).count(), 4);
+}
+
+#[test]
+fn test_iterator_chain_find() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [30, 40, 50, 60];
+ let mut iter = xs.iter().chain(&ys);
+ assert_eq!(iter.find(|&&i| i == 4), Some(&4));
+ assert_eq!(iter.next(), Some(&5));
+ assert_eq!(iter.find(|&&i| i == 40), Some(&40));
+ assert_eq!(iter.next(), Some(&50));
+ assert_eq!(iter.find(|&&i| i == 100), None);
+ assert_eq!(iter.next(), None);
+}
+
+struct Toggle {
+ is_empty: bool,
+}
+
+impl Iterator for Toggle {
+ type Item = ();
+
+ // alternates between `None` and `Some(())`
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.is_empty {
+ self.is_empty = false;
+ None
+ } else {
+ self.is_empty = true;
+ Some(())
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ if self.is_empty { (0, Some(0)) } else { (1, Some(1)) }
+ }
+}
+
+impl DoubleEndedIterator for Toggle {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.next()
+ }
+}
+
+#[test]
+fn test_iterator_chain_size_hint() {
+ // this chains an iterator of length 0 with an iterator of length 1,
+ // so after calling `.next()` once, the iterator is empty and the
+ // state is `ChainState::Back`. `.size_hint()` should now disregard
+ // the size hint of the left iterator
+ let mut iter = Toggle { is_empty: true }.chain(once(()));
+ assert_eq!(iter.next(), Some(()));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+
+ let mut iter = once(()).chain(Toggle { is_empty: true });
+ assert_eq!(iter.next_back(), Some(()));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+}
+
+#[test]
+fn test_iterator_chain_unfused() {
+ // Chain shouldn't be fused in its second iterator, depending on direction
+ let mut iter = NonFused::new(empty()).chain(Toggle { is_empty: true });
+ iter.next().unwrap_none();
+ iter.next().unwrap();
+ iter.next().unwrap_none();
+
+ let mut iter = Toggle { is_empty: true }.chain(NonFused::new(empty()));
+ iter.next_back().unwrap_none();
+ iter.next_back().unwrap();
+ iter.next_back().unwrap_none();
+}
+
+#[test]
+fn test_zip_nth() {
+ let xs = [0, 1, 2, 4, 5];
+ let ys = [10, 11, 12];
+
+ let mut it = xs.iter().zip(&ys);
+ assert_eq!(it.nth(0), Some((&0, &10)));
+ assert_eq!(it.nth(1), Some((&2, &12)));
+ assert_eq!(it.nth(0), None);
+
+ let mut it = xs.iter().zip(&ys);
+ assert_eq!(it.nth(3), None);
+
+ let mut it = ys.iter().zip(&xs);
+ assert_eq!(it.nth(3), None);
+}
+
+#[test]
+fn test_zip_nth_side_effects() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let value = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }))
+ .skip(1)
+ .nth(3);
+ assert_eq!(value, Some((50, 6000)));
+ assert_eq!(a, vec![1, 2, 3, 4, 5]);
+ assert_eq!(b, vec![200, 300, 400, 500, 600]);
+}
+
+#[test]
+fn test_zip_next_back_side_effects() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let mut iter = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }));
+
+ // The second iterator is one item longer, so `next_back` is called on it
+ // one more time.
+ assert_eq!(iter.next_back(), Some((60, 7000)));
+ assert_eq!(iter.next_back(), Some((50, 6000)));
+ assert_eq!(iter.next_back(), Some((40, 5000)));
+ assert_eq!(iter.next_back(), Some((30, 4000)));
+ assert_eq!(a, vec![6, 5, 4, 3]);
+ assert_eq!(b, vec![800, 700, 600, 500, 400]);
+}
+
+#[test]
+fn test_zip_nth_back_side_effects() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let value = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }))
+ .nth_back(3);
+ assert_eq!(value, Some((30, 4000)));
+ assert_eq!(a, vec![6, 5, 4, 3]);
+ assert_eq!(b, vec![800, 700, 600, 500, 400]);
+}
+
+#[test]
+fn test_zip_next_back_side_effects_exhausted() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let mut iter = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }));
+
+ iter.next();
+ iter.next();
+ iter.next();
+ iter.next();
+ assert_eq!(iter.next_back(), None);
+ assert_eq!(a, vec![1, 2, 3, 4, 6, 5]);
+ assert_eq!(b, vec![200, 300, 400]);
+}
+
+#[derive(Debug)]
+struct CountClone(Cell<i32>);
+
+fn count_clone() -> CountClone {
+ CountClone(Cell::new(0))
+}
+
+impl PartialEq<i32> for CountClone {
+ fn eq(&self, rhs: &i32) -> bool {
+ self.0.get() == *rhs
+ }
+}
+
+impl Clone for CountClone {
+ fn clone(&self) -> Self {
+ let ret = CountClone(self.0.clone());
+ let n = self.0.get();
+ self.0.set(n + 1);
+ ret
+ }
+}
+
+#[test]
+fn test_zip_cloned_sideffectful() {
+ let xs = [count_clone(), count_clone(), count_clone(), count_clone()];
+ let ys = [count_clone(), count_clone()];
+
+ for _ in xs.iter().cloned().zip(ys.iter().cloned()) {}
+
+ assert_eq!(&xs, &[1, 1, 1, 0][..]);
+ assert_eq!(&ys, &[1, 1][..]);
+
+ let xs = [count_clone(), count_clone()];
+ let ys = [count_clone(), count_clone(), count_clone(), count_clone()];
+
+ for _ in xs.iter().cloned().zip(ys.iter().cloned()) {}
+
+ assert_eq!(&xs, &[1, 1][..]);
+ assert_eq!(&ys, &[1, 1, 0, 0][..]);
+}
+
+#[test]
+fn test_zip_map_sideffectful() {
+ let mut xs = [0; 6];
+ let mut ys = [0; 4];
+
+ for _ in xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1)) {}
+
+ assert_eq!(&xs, &[1, 1, 1, 1, 1, 0]);
+ assert_eq!(&ys, &[1, 1, 1, 1]);
+
+ let mut xs = [0; 4];
+ let mut ys = [0; 6];
+
+ for _ in xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1)) {}
+
+ assert_eq!(&xs, &[1, 1, 1, 1]);
+ assert_eq!(&ys, &[1, 1, 1, 1, 0, 0]);
+}
+
+#[test]
+fn test_zip_map_rev_sideffectful() {
+ let mut xs = [0; 6];
+ let mut ys = [0; 4];
+
+ {
+ let mut it = xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1));
+ it.next_back();
+ }
+ assert_eq!(&xs, &[0, 0, 0, 1, 1, 1]);
+ assert_eq!(&ys, &[0, 0, 0, 1]);
+
+ let mut xs = [0; 6];
+ let mut ys = [0; 4];
+
+ {
+ let mut it = xs.iter_mut().map(|x| *x += 1).zip(ys.iter_mut().map(|y| *y += 1));
+ (&mut it).take(5).count();
+ it.next_back();
+ }
+ assert_eq!(&xs, &[1, 1, 1, 1, 1, 1]);
+ assert_eq!(&ys, &[1, 1, 1, 1]);
+}
+
+#[test]
+fn test_zip_nested_sideffectful() {
+ let mut xs = [0; 6];
+ let ys = [0; 4];
+
+ {
+ // test that it has the side effect nested inside enumerate
+ let it = xs.iter_mut().map(|x| *x = 1).enumerate().zip(&ys);
+ it.count();
+ }
+ assert_eq!(&xs, &[1, 1, 1, 1, 1, 0]);
+}
+
+#[test]
+fn test_zip_nth_back_side_effects_exhausted() {
+ let mut a = Vec::new();
+ let mut b = Vec::new();
+ let mut iter = [1, 2, 3, 4, 5, 6]
+ .iter()
+ .cloned()
+ .map(|n| {
+ a.push(n);
+ n * 10
+ })
+ .zip([2, 3, 4].iter().cloned().map(|n| {
+ b.push(n * 100);
+ n * 1000
+ }));
+
+ iter.next();
+ iter.next();
+ iter.next();
+ iter.next();
+ assert_eq!(iter.nth_back(0), None);
+ assert_eq!(a, vec![1, 2, 3, 4, 6, 5]);
+ assert_eq!(b, vec![200, 300, 400]);
+}
+
+#[test]
+fn test_iterator_step_by() {
+ // Identity
+ let mut it = (0..).step_by(1).take(3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.next(), None);
+
+ let mut it = (0..).step_by(3).take(4);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(3));
+ assert_eq!(it.next(), Some(6));
+ assert_eq!(it.next(), Some(9));
+ assert_eq!(it.next(), None);
+
+ let mut it = (0..3).step_by(1);
+ assert_eq!(it.next_back(), Some(2));
+ assert_eq!(it.next_back(), Some(1));
+ assert_eq!(it.next_back(), Some(0));
+ assert_eq!(it.next_back(), None);
+
+ let mut it = (0..11).step_by(3);
+ assert_eq!(it.next_back(), Some(9));
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(it.next_back(), Some(3));
+ assert_eq!(it.next_back(), Some(0));
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth() {
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.nth(0), Some(0));
+ assert_eq!(it.nth(0), Some(5));
+ assert_eq!(it.nth(0), Some(10));
+ assert_eq!(it.nth(0), Some(15));
+ assert_eq!(it.nth(0), None);
+
+ let it = (0..18).step_by(5);
+ assert_eq!(it.clone().nth(0), Some(0));
+ assert_eq!(it.clone().nth(1), Some(5));
+ assert_eq!(it.clone().nth(2), Some(10));
+ assert_eq!(it.clone().nth(3), Some(15));
+ assert_eq!(it.clone().nth(4), None);
+ assert_eq!(it.clone().nth(42), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth_overflow() {
+ #[cfg(target_pointer_width = "8")]
+ type Bigger = u16;
+ #[cfg(target_pointer_width = "16")]
+ type Bigger = u32;
+ #[cfg(target_pointer_width = "32")]
+ type Bigger = u64;
+ #[cfg(target_pointer_width = "64")]
+ type Bigger = u128;
+
+ #[derive(Clone)]
+ struct Test(Bigger);
+ impl Iterator for &mut Test {
+ type Item = i32;
+ fn next(&mut self) -> Option<Self::Item> {
+ Some(21)
+ }
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0 += n as Bigger + 1;
+ Some(42)
+ }
+ }
+
+ let mut it = Test(0);
+ let root = usize::MAX >> (usize::BITS / 2);
+ let n = root + 20;
+ (&mut it).step_by(n).nth(n);
+ assert_eq!(it.0, n as Bigger * n as Bigger);
+
+ // large step
+ let mut it = Test(0);
+ (&mut it).step_by(usize::MAX).nth(5);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 5);
+
+ // n + 1 overflows
+ let mut it = Test(0);
+ (&mut it).step_by(2).nth(usize::MAX);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 2);
+
+ // n + 1 overflows
+ let mut it = Test(0);
+ (&mut it).step_by(1).nth(usize::MAX);
+ assert_eq!(it.0, (usize::MAX as Bigger) * 1);
+}
+
+#[test]
+fn test_iterator_step_by_nth_try_fold() {
+ let mut it = (0..).step_by(10);
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it.next(), Some(60));
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it.next(), Some(90));
+
+ let mut it = (100..).step_by(10);
+ assert_eq!(it.try_fold(50, i8::checked_add), None);
+ assert_eq!(it.next(), Some(110));
+
+ let mut it = (100..=100).step_by(10);
+ assert_eq!(it.next(), Some(100));
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(0));
+}
+
+#[test]
+fn test_iterator_step_by_nth_back() {
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.nth_back(0), Some(15));
+ assert_eq!(it.nth_back(0), Some(10));
+ assert_eq!(it.nth_back(0), Some(5));
+ assert_eq!(it.nth_back(0), Some(0));
+ assert_eq!(it.nth_back(0), None);
+
+ let mut it = (0..16).step_by(5);
+ assert_eq!(it.next(), Some(0)); // to set `first_take` to `false`
+ assert_eq!(it.nth_back(0), Some(15));
+ assert_eq!(it.nth_back(0), Some(10));
+ assert_eq!(it.nth_back(0), Some(5));
+ assert_eq!(it.nth_back(0), None);
+
+ let it = || (0..18).step_by(5);
+ assert_eq!(it().nth_back(0), Some(15));
+ assert_eq!(it().nth_back(1), Some(10));
+ assert_eq!(it().nth_back(2), Some(5));
+ assert_eq!(it().nth_back(3), Some(0));
+ assert_eq!(it().nth_back(4), None);
+ assert_eq!(it().nth_back(42), None);
+}
+
+#[test]
+fn test_iterator_step_by_nth_try_rfold() {
+ let mut it = (0..100).step_by(10);
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it.next_back(), Some(70));
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it.next_back(), Some(30));
+
+ let mut it = (0..100).step_by(10);
+ assert_eq!(it.try_rfold(50, i8::checked_add), None);
+ assert_eq!(it.next_back(), Some(80));
+
+ let mut it = (100..=100).step_by(10);
+ assert_eq!(it.next_back(), Some(100));
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(0));
+}
+
+#[test]
+#[should_panic]
+fn test_iterator_step_by_zero() {
+ let mut it = (0..).step_by(0);
+ it.next();
+}
+
+#[test]
+fn test_iterator_step_by_size_hint() {
+ struct StubSizeHint(usize, Option<usize>);
+ impl Iterator for StubSizeHint {
+ type Item = ();
+ fn next(&mut self) -> Option<()> {
+ self.0 -= 1;
+ if let Some(ref mut upper) = self.1 {
+ *upper -= 1;
+ }
+ Some(())
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.0, self.1)
+ }
+ }
+
+ // The two checks in each case are needed because the logic
+ // is different before the first call to `next()`.
+
+ let mut it = StubSizeHint(10, Some(10)).step_by(1);
+ assert_eq!(it.size_hint(), (10, Some(10)));
+ it.next();
+ assert_eq!(it.size_hint(), (9, Some(9)));
+
+ // exact multiple
+ let mut it = StubSizeHint(10, Some(10)).step_by(3);
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ it.next();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+
+ // larger base range, but not enough to get another element
+ let mut it = StubSizeHint(12, Some(12)).step_by(3);
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ it.next();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+
+ // smaller base range, so fewer resulting elements
+ let mut it = StubSizeHint(9, Some(9)).step_by(3);
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ it.next();
+ assert_eq!(it.size_hint(), (2, Some(2)));
+
+ // infinite upper bound
+ let mut it = StubSizeHint(usize::MAX, None).step_by(1);
+ assert_eq!(it.size_hint(), (usize::MAX, None));
+ it.next();
+ assert_eq!(it.size_hint(), (usize::MAX - 1, None));
+
+ // still infinite with larger step
+ let mut it = StubSizeHint(7, None).step_by(3);
+ assert_eq!(it.size_hint(), (3, None));
+ it.next();
+ assert_eq!(it.size_hint(), (2, None));
+
+ // propagates ExactSizeIterator
+ let a = [1, 2, 3, 4, 5];
+ let it = a.iter().step_by(2);
+ assert_eq!(it.len(), 3);
+
+ // Cannot be TrustedLen as a step greater than one makes an iterator
+ // with (usize::MAX, None) no longer meet the safety requirements
+ trait TrustedLenCheck {
+ fn test(self) -> bool;
+ }
+ impl<T: Iterator> TrustedLenCheck for T {
+ default fn test(self) -> bool {
+ false
+ }
+ }
+ impl<T: TrustedLen> TrustedLenCheck for T {
+ fn test(self) -> bool {
+ true
+ }
+ }
+ assert!(TrustedLenCheck::test(a.iter()));
+ assert!(!TrustedLenCheck::test(a.iter().step_by(1)));
+}
+
+#[test]
+fn test_filter_map() {
+ let it = (0..).step_by(1).take(10).filter_map(|x| if x % 2 == 0 { Some(x * x) } else { None });
+ assert_eq!(it.collect::<Vec<usize>>(), [0 * 0, 2 * 2, 4 * 4, 6 * 6, 8 * 8]);
+}
+
+#[test]
+fn test_filter_map_fold() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let ys = [0 * 0, 2 * 2, 4 * 4, 6 * 6, 8 * 8];
+ let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x * x) } else { None });
+ let i = it.fold(0, |i, x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x * x) } else { None });
+ let i = it.rfold(ys.len(), |i, x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_iterator_enumerate() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let it = xs.iter().enumerate();
+ for (i, &x) in it {
+ assert_eq!(i, x);
+ }
+}
+
+#[test]
+fn test_iterator_enumerate_nth() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ for (i, &x) in xs.iter().enumerate() {
+ assert_eq!(i, x);
+ }
+
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth(0) {
+ assert_eq!(i, x);
+ }
+
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth(1) {
+ assert_eq!(i, x);
+ }
+
+ let (i, &x) = xs.iter().enumerate().nth(3).unwrap();
+ assert_eq!(i, x);
+ assert_eq!(i, 3);
+}
+
+#[test]
+fn test_iterator_enumerate_nth_back() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth_back(0) {
+ assert_eq!(i, x);
+ }
+
+ let mut it = xs.iter().enumerate();
+ while let Some((i, &x)) = it.nth_back(1) {
+ assert_eq!(i, x);
+ }
+
+ let (i, &x) = xs.iter().enumerate().nth_back(3).unwrap();
+ assert_eq!(i, x);
+ assert_eq!(i, 2);
+}
+
+#[test]
+fn test_iterator_enumerate_count() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ assert_eq!(xs.iter().enumerate().count(), 6);
+}
+
+#[test]
+fn test_iterator_enumerate_fold() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().enumerate();
+ // steal a couple to get an interesting offset
+ assert_eq!(it.next(), Some((0, &0)));
+ assert_eq!(it.next(), Some((1, &1)));
+ let i = it.fold(2, |i, (j, &x)| {
+ assert_eq!(i, j);
+ assert_eq!(x, xs[j]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+
+ let mut it = xs.iter().enumerate();
+ assert_eq!(it.next(), Some((0, &0)));
+ let i = it.rfold(xs.len() - 1, |i, (j, &x)| {
+ assert_eq!(i, j);
+ assert_eq!(x, xs[j]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_iterator_filter_count() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ assert_eq!(xs.iter().filter(|&&x| x % 2 == 0).count(), 5);
+}
+
+#[test]
+fn test_iterator_filter_fold() {
+ let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let ys = [0, 2, 4, 6, 8];
+ let it = xs.iter().filter(|&&x| x % 2 == 0);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let it = xs.iter().filter(|&&x| x % 2 == 0);
+ let i = it.rfold(ys.len(), |i, &x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_iterator_peekable() {
+ let xs = vec![0, 1, 2, 3, 4, 5];
+
+ let mut it = xs.iter().cloned().peekable();
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.next().unwrap(), 0);
+ assert_eq!(it.len(), 5);
+ assert_eq!(it.next().unwrap(), 1);
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next().unwrap(), 2);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &3);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &3);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next().unwrap(), 3);
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next().unwrap(), 4);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.peek().unwrap(), &5);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next().unwrap(), 5);
+ assert_eq!(it.len(), 0);
+ assert!(it.peek().is_none());
+ assert_eq!(it.len(), 0);
+ assert!(it.next().is_none());
+ assert_eq!(it.len(), 0);
+
+ let mut it = xs.iter().cloned().peekable();
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 6);
+ assert_eq!(it.next_back().unwrap(), 5);
+ assert_eq!(it.len(), 5);
+ assert_eq!(it.next_back().unwrap(), 4);
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next_back().unwrap(), 3);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next_back().unwrap(), 2);
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back().unwrap(), 1);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.peek().unwrap(), &0);
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back().unwrap(), 0);
+ assert_eq!(it.len(), 0);
+ assert!(it.peek().is_none());
+ assert_eq!(it.len(), 0);
+ assert!(it.next_back().is_none());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_peekable_count() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [10];
+ let zs: [i32; 0] = [];
+
+ assert_eq!(xs.iter().peekable().count(), 6);
+
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.count(), 6);
+
+ assert_eq!(ys.iter().peekable().count(), 1);
+
+ let mut it = ys.iter().peekable();
+ assert_eq!(it.peek(), Some(&&10));
+ assert_eq!(it.count(), 1);
+
+ assert_eq!(zs.iter().peekable().count(), 0);
+
+ let mut it = zs.iter().peekable();
+ assert_eq!(it.peek(), None);
+}
+
+#[test]
+fn test_iterator_peekable_nth() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().peekable();
+
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.nth(0), Some(&0));
+ assert_eq!(it.peek(), Some(&&1));
+ assert_eq!(it.nth(1), Some(&2));
+ assert_eq!(it.peek(), Some(&&3));
+ assert_eq!(it.nth(2), Some(&5));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_peekable_last() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let ys = [0];
+
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.last(), Some(&5));
+
+ let mut it = ys.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ assert_eq!(it.last(), Some(&0));
+
+ let mut it = ys.iter().peekable();
+ assert_eq!(it.next(), Some(&0));
+ assert_eq!(it.peek(), None);
+ assert_eq!(it.last(), None);
+}
+
+#[test]
+fn test_iterator_peekable_fold() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+}
+
+#[test]
+fn test_iterator_peekable_rfold() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().peekable();
+ assert_eq!(it.peek(), Some(&&0));
+ let i = it.rfold(0, |i, &x| {
+ assert_eq!(x, xs[xs.len() - 1 - i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+}
+
+#[test]
+fn test_iterator_peekable_next_if_eq() {
+ // first, try on references
+ let xs = vec!["Heart", "of", "Gold"];
+ let mut it = xs.into_iter().peekable();
+ // try before `peek()`
+ assert_eq!(it.next_if_eq(&"trillian"), None);
+ assert_eq!(it.next_if_eq(&"Heart"), Some("Heart"));
+ // try after peek()
+ assert_eq!(it.peek(), Some(&"of"));
+ assert_eq!(it.next_if_eq(&"of"), Some("of"));
+ assert_eq!(it.next_if_eq(&"zaphod"), None);
+ // make sure `next()` still behaves
+ assert_eq!(it.next(), Some("Gold"));
+
+ // make sure comparison works for owned values
+ let xs = vec![String::from("Ludicrous"), "speed".into()];
+ let mut it = xs.into_iter().peekable();
+ // make sure basic functionality works
+ assert_eq!(it.next_if_eq("Ludicrous"), Some("Ludicrous".into()));
+ assert_eq!(it.next_if_eq("speed"), Some("speed".into()));
+ assert_eq!(it.next_if_eq(""), None);
+}
+
+/// This is an iterator that follows the Iterator contract,
+/// but it is not fused. After having returned None once, it will start
+/// producing elements if .next() is called again.
+pub struct CycleIter<'a, T> {
+ index: usize,
+ data: &'a [T],
+}
+
+pub fn cycle<T>(data: &[T]) -> CycleIter<'_, T> {
+ CycleIter { index: 0, data }
+}
+
+impl<'a, T> Iterator for CycleIter<'a, T> {
+ type Item = &'a T;
+ fn next(&mut self) -> Option<Self::Item> {
+ let elt = self.data.get(self.index);
+ self.index += 1;
+ self.index %= 1 + self.data.len();
+ elt
+ }
+}
+
+#[test]
+fn test_iterator_peekable_remember_peek_none_1() {
+ // Check that the loop using .peek() terminates
+ let data = [1, 2, 3];
+ let mut iter = cycle(&data).peekable();
+
+ let mut n = 0;
+ while let Some(_) = iter.next() {
+ let is_the_last = iter.peek().is_none();
+ assert_eq!(is_the_last, n == data.len() - 1);
+ n += 1;
+ if n > data.len() {
+ break;
+ }
+ }
+ assert_eq!(n, data.len());
+}
+
+#[test]
+fn test_iterator_peekable_remember_peek_none_2() {
+ let data = [0];
+ let mut iter = cycle(&data).peekable();
+ iter.next();
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.last(), None);
+}
+
+#[test]
+fn test_iterator_peekable_remember_peek_none_3() {
+ let data = [0];
+ let mut iter = cycle(&data).peekable();
+ iter.peek();
+ assert_eq!(iter.nth(0), Some(&0));
+
+ let mut iter = cycle(&data).peekable();
+ iter.next();
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.nth(0), None);
+}
+
+#[test]
+fn test_iterator_take_while() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [0, 1, 2, 3, 5, 13];
+ let it = xs.iter().take_while(|&x| *x < 15);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(*x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_iterator_skip_while() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [15, 16, 17, 19];
+ let it = xs.iter().skip_while(|&x| *x < 15);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(*x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_iterator_skip_while_fold() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [15, 16, 17, 19];
+ let it = xs.iter().skip_while(|&x| *x < 15);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().skip_while(|&x| *x < 15);
+ assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
+ let i = it.fold(1, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_iterator_skip() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+ let ys = [13, 15, 16, 17, 19, 20, 30];
+ let mut it = xs.iter().skip(5);
+ let mut i = 0;
+ while let Some(&x) = it.next() {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ assert_eq!(it.len(), xs.len() - 5 - i);
+ }
+ assert_eq!(i, ys.len());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_skip_doubleended() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+ let mut it = xs.iter().rev().skip(5);
+ assert_eq!(it.next(), Some(&15));
+ assert_eq!(it.by_ref().rev().next(), Some(&0));
+ assert_eq!(it.next(), Some(&13));
+ assert_eq!(it.by_ref().rev().next(), Some(&1));
+ assert_eq!(it.next(), Some(&5));
+ assert_eq!(it.by_ref().rev().next(), Some(&2));
+ assert_eq!(it.next(), Some(&3));
+ assert_eq!(it.next(), None);
+ let mut it = xs.iter().rev().skip(5).rev();
+ assert_eq!(it.next(), Some(&0));
+ assert_eq!(it.rev().next(), Some(&15));
+ let mut it_base = xs.iter();
+ {
+ let mut it = it_base.by_ref().skip(5).rev();
+ assert_eq!(it.next(), Some(&30));
+ assert_eq!(it.next(), Some(&20));
+ assert_eq!(it.next(), Some(&19));
+ assert_eq!(it.next(), Some(&17));
+ assert_eq!(it.next(), Some(&16));
+ assert_eq!(it.next(), Some(&15));
+ assert_eq!(it.next(), Some(&13));
+ assert_eq!(it.next(), None);
+ }
+ // make sure the skipped parts have not been consumed
+ assert_eq!(it_base.next(), Some(&0));
+ assert_eq!(it_base.next(), Some(&1));
+ assert_eq!(it_base.next(), Some(&2));
+ assert_eq!(it_base.next(), Some(&3));
+ assert_eq!(it_base.next(), Some(&5));
+ assert_eq!(it_base.next(), None);
+ let it = xs.iter().skip(5).rev();
+ assert_eq!(it.last(), Some(&13));
+}
+
+#[test]
+fn test_iterator_skip_nth() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+
+ let mut it = xs.iter().skip(0);
+ assert_eq!(it.nth(0), Some(&0));
+ assert_eq!(it.nth(1), Some(&2));
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.nth(0), Some(&13));
+ assert_eq!(it.nth(1), Some(&16));
+
+ let mut it = xs.iter().skip(12);
+ assert_eq!(it.nth(0), None);
+}
+
+#[test]
+fn test_iterator_skip_count() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+
+ assert_eq!(xs.iter().skip(0).count(), 12);
+ assert_eq!(xs.iter().skip(1).count(), 11);
+ assert_eq!(xs.iter().skip(11).count(), 1);
+ assert_eq!(xs.iter().skip(12).count(), 0);
+ assert_eq!(xs.iter().skip(13).count(), 0);
+}
+
+#[test]
+fn test_iterator_skip_last() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+
+ assert_eq!(xs.iter().skip(0).last(), Some(&30));
+ assert_eq!(xs.iter().skip(1).last(), Some(&30));
+ assert_eq!(xs.iter().skip(11).last(), Some(&30));
+ assert_eq!(xs.iter().skip(12).last(), None);
+ assert_eq!(xs.iter().skip(13).last(), None);
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.next(), Some(&13));
+ assert_eq!(it.last(), Some(&30));
+}
+
+#[test]
+fn test_iterator_skip_fold() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30];
+ let ys = [13, 15, 16, 17, 19, 20, 30];
+
+ let it = xs.iter().skip(5);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
+ let i = it.fold(1, |i, &x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let it = xs.iter().skip(5);
+ let i = it.rfold(ys.len(), |i, &x| {
+ let i = i - 1;
+ assert_eq!(x, ys[i]);
+ i
+ });
+ assert_eq!(i, 0);
+
+ let mut it = xs.iter().skip(5);
+ assert_eq!(it.next(), Some(&ys[0])); // process skips before folding
+ let i = it.rfold(ys.len(), |i, &x| {
+ let i = i - 1;
+ assert_eq!(x, ys[i]);
+ i
+ });
+ assert_eq!(i, 1);
+}
+
+#[test]
+fn test_iterator_take() {
+ let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19];
+ let ys = [0, 1, 2, 3, 5];
+
+ let mut it = xs.iter().take(ys.len());
+ let mut i = 0;
+ assert_eq!(it.len(), ys.len());
+ while let Some(&x) = it.next() {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ assert_eq!(it.len(), ys.len() - i);
+ }
+ assert_eq!(i, ys.len());
+ assert_eq!(it.len(), 0);
+
+ let mut it = xs.iter().take(ys.len());
+ let mut i = 0;
+ assert_eq!(it.len(), ys.len());
+ while let Some(&x) = it.next_back() {
+ i += 1;
+ assert_eq!(x, ys[ys.len() - i]);
+ assert_eq!(it.len(), ys.len() - i);
+ }
+ assert_eq!(i, ys.len());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_take_nth() {
+ let xs = [0, 1, 2, 4, 5];
+ let mut it = xs.iter();
+ {
+ let mut take = it.by_ref().take(3);
+ let mut i = 0;
+ while let Some(&x) = take.nth(0) {
+ assert_eq!(x, i);
+ i += 1;
+ }
+ }
+ assert_eq!(it.nth(1), Some(&5));
+ assert_eq!(it.nth(0), None);
+
+ let xs = [0, 1, 2, 3, 4];
+ let mut it = xs.iter().take(7);
+ let mut i = 1;
+ while let Some(&x) = it.nth(1) {
+ assert_eq!(x, i);
+ i += 2;
+ }
+}
+
+#[test]
+fn test_iterator_take_nth_back() {
+ let xs = [0, 1, 2, 4, 5];
+ let mut it = xs.iter();
+ {
+ let mut take = it.by_ref().take(3);
+ let mut i = 0;
+ while let Some(&x) = take.nth_back(0) {
+ i += 1;
+ assert_eq!(x, 3 - i);
+ }
+ }
+ assert_eq!(it.nth_back(0), None);
+
+ let xs = [0, 1, 2, 3, 4];
+ let mut it = xs.iter().take(7);
+ assert_eq!(it.nth_back(1), Some(&3));
+ assert_eq!(it.nth_back(1), Some(&1));
+ assert_eq!(it.nth_back(1), None);
+}
+
+#[test]
+fn test_iterator_take_short() {
+ let xs = [0, 1, 2, 3];
+
+ let mut it = xs.iter().take(5);
+ let mut i = 0;
+ assert_eq!(it.len(), xs.len());
+ while let Some(&x) = it.next() {
+ assert_eq!(x, xs[i]);
+ i += 1;
+ assert_eq!(it.len(), xs.len() - i);
+ }
+ assert_eq!(i, xs.len());
+ assert_eq!(it.len(), 0);
+
+ let mut it = xs.iter().take(5);
+ let mut i = 0;
+ assert_eq!(it.len(), xs.len());
+ while let Some(&x) = it.next_back() {
+ i += 1;
+ assert_eq!(x, xs[xs.len() - i]);
+ assert_eq!(it.len(), xs.len() - i);
+ }
+ assert_eq!(i, xs.len());
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_iterator_scan() {
+ // test the type inference
+ fn add(old: &mut isize, new: &usize) -> Option<f64> {
+ *old += *new as isize;
+ Some(*old as f64)
+ }
+ let xs = [0, 1, 2, 3, 4];
+ let ys = [0f64, 1.0, 3.0, 6.0, 10.0];
+
+ let it = xs.iter().scan(0, add);
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+#[test]
+fn test_iterator_flat_map() {
+ let xs = [0, 3, 6];
+ let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let it = xs.iter().flat_map(|&x| (x..).step_by(1).take(3));
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+/// Tests `FlatMap::fold` with items already picked off the front and back,
+/// to make sure all parts of the `FlatMap` are folded correctly.
+#[test]
+fn test_iterator_flat_map_fold() {
+ let xs = [0, 3, 6];
+ let ys = [1, 2, 3, 4, 5, 6, 7];
+ let mut it = xs.iter().flat_map(|&x| x..x + 3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.fold(0, |i, x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().flat_map(|&x| x..x + 3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.rfold(ys.len(), |i, x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_iterator_flatten() {
+ let xs = [0, 3, 6];
+ let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+ let it = xs.iter().map(|&x| (x..).step_by(1).take(3)).flatten();
+ let mut i = 0;
+ for x in it {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, ys.len());
+}
+
+/// Tests `Flatten::fold` with items already picked off the front and back,
+/// to make sure all parts of the `Flatten` are folded correctly.
+#[test]
+fn test_iterator_flatten_fold() {
+ let xs = [0, 3, 6];
+ let ys = [1, 2, 3, 4, 5, 6, 7];
+ let mut it = xs.iter().map(|&x| x..x + 3).flatten();
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.fold(0, |i, x| {
+ assert_eq!(x, ys[i]);
+ i + 1
+ });
+ assert_eq!(i, ys.len());
+
+ let mut it = xs.iter().map(|&x| x..x + 3).flatten();
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next_back(), Some(8));
+ let i = it.rfold(ys.len(), |i, x| {
+ assert_eq!(x, ys[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+}
+
+#[test]
+fn test_inspect() {
+ let xs = [1, 2, 3, 4];
+ let mut n = 0;
+
+ let ys = xs.iter().cloned().inspect(|_| n += 1).collect::<Vec<usize>>();
+
+ assert_eq!(n, xs.len());
+ assert_eq!(&xs[..], &ys[..]);
+}
+
+#[test]
+fn test_inspect_fold() {
+ let xs = [1, 2, 3, 4];
+ let mut n = 0;
+ {
+ let it = xs.iter().inspect(|_| n += 1);
+ let i = it.fold(0, |i, &x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+ }
+ assert_eq!(n, xs.len());
+
+ let mut n = 0;
+ {
+ let it = xs.iter().inspect(|_| n += 1);
+ let i = it.rfold(xs.len(), |i, &x| {
+ assert_eq!(x, xs[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+ }
+ assert_eq!(n, xs.len());
+}
+
+#[test]
+fn test_cycle() {
+ let cycle_len = 3;
+ let it = (0..).step_by(1).take(cycle_len).cycle();
+ assert_eq!(it.size_hint(), (usize::MAX, None));
+ for (i, x) in it.take(100).enumerate() {
+ assert_eq!(i % cycle_len, x);
+ }
+
+ let mut it = (0..).step_by(1).take(0).cycle();
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+
+ assert_eq!(empty::<i32>().cycle().fold(0, |acc, x| acc + x), 0);
+
+ assert_eq!(once(1).cycle().skip(1).take(4).fold(0, |acc, x| acc + x), 4);
+
+ assert_eq!((0..10).cycle().take(5).sum::<i32>(), 10);
+ assert_eq!((0..10).cycle().take(15).sum::<i32>(), 55);
+ assert_eq!((0..10).cycle().take(25).sum::<i32>(), 100);
+
+ let mut iter = (0..10).cycle();
+ iter.nth(14);
+ assert_eq!(iter.take(8).sum::<i32>(), 38);
+
+ let mut iter = (0..10).cycle();
+ iter.nth(9);
+ assert_eq!(iter.take(3).sum::<i32>(), 3);
+}
+
+#[test]
+fn test_iterator_nth() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth(i).unwrap(), &v[i]);
+ }
+ assert_eq!(v.iter().nth(v.len()), None);
+}
+
+#[test]
+fn test_iterator_nth_back() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth_back(i).unwrap(), &v[v.len() - 1 - i]);
+ }
+ assert_eq!(v.iter().nth_back(v.len()), None);
+}
+
+#[test]
+fn test_iterator_rev_nth_back() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().rev().nth_back(i).unwrap(), &v[i]);
+ }
+ assert_eq!(v.iter().rev().nth_back(v.len()), None);
+}
+
+#[test]
+fn test_iterator_rev_nth() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().rev().nth(i).unwrap(), &v[v.len() - 1 - i]);
+ }
+ assert_eq!(v.iter().rev().nth(v.len()), None);
+}
+
+#[test]
+fn test_iterator_advance_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter();
+ assert_eq!(iter.advance_by(i), Ok(()));
+ assert_eq!(iter.next().unwrap(), &v[i]);
+ assert_eq!(iter.advance_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().advance_by(v.len()), Ok(()));
+ assert_eq!(v.iter().advance_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_advance_back_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter();
+ assert_eq!(iter.advance_back_by(i), Ok(()));
+ assert_eq!(iter.next_back().unwrap(), &v[v.len() - 1 - i]);
+ assert_eq!(iter.advance_back_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().advance_back_by(v.len()), Ok(()));
+ assert_eq!(v.iter().advance_back_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_rev_advance_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter().rev();
+ assert_eq!(iter.advance_by(i), Ok(()));
+ assert_eq!(iter.next().unwrap(), &v[v.len() - 1 - i]);
+ assert_eq!(iter.advance_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().rev().advance_by(v.len()), Ok(()));
+ assert_eq!(v.iter().rev().advance_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_rev_advance_back_by() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+
+ for i in 0..v.len() {
+ let mut iter = v.iter().rev();
+ assert_eq!(iter.advance_back_by(i), Ok(()));
+ assert_eq!(iter.next_back().unwrap(), &v[i]);
+ assert_eq!(iter.advance_back_by(100), Err(v.len() - 1 - i));
+ }
+
+ assert_eq!(v.iter().rev().advance_back_by(v.len()), Ok(()));
+ assert_eq!(v.iter().rev().advance_back_by(100), Err(v.len()));
+}
+
+#[test]
+fn test_iterator_last() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ assert_eq!(v.iter().last().unwrap(), &4);
+ assert_eq!(v[..1].iter().last().unwrap(), &0);
+}
+
+#[test]
+fn test_iterator_len() {
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().count(), 4);
+ assert_eq!(v[..10].iter().count(), 10);
+ assert_eq!(v[..0].iter().count(), 0);
+}
+
+#[test]
+fn test_iterator_sum() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().sum::<i32>(), 6);
+ assert_eq!(v.iter().cloned().sum::<i32>(), 55);
+ assert_eq!(v[..0].iter().cloned().sum::<i32>(), 0);
+}
+
+#[test]
+fn test_iterator_sum_result() {
+ let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<Result<i32, _>>(), Ok(10));
+ let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<Result<i32, _>>(), Err(()));
+
+ #[derive(PartialEq, Debug)]
+ struct S(Result<i32, ()>);
+
+ impl Sum<Result<i32, ()>> for S {
+ fn sum<I: Iterator<Item = Result<i32, ()>>>(mut iter: I) -> Self {
+ // takes the sum by repeatedly calling `next` on `iter`,
+ // thus testing that repeated calls to `ResultShunt::try_fold`
+ // produce the expected results
+ Self(iter.by_ref().sum())
+ }
+ }
+
+ let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<S>(), S(Ok(10)));
+ let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().sum::<S>(), S(Err(())));
+}
+
+#[test]
+fn test_iterator_sum_option() {
+ let v: &[Option<i32>] = &[Some(1), Some(2), Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().sum::<Option<i32>>(), Some(10));
+ let v: &[Option<i32>] = &[Some(1), None, Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().sum::<Option<i32>>(), None);
+}
+
+#[test]
+fn test_iterator_product() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().product::<i32>(), 0);
+ assert_eq!(v[1..5].iter().cloned().product::<i32>(), 24);
+ assert_eq!(v[..0].iter().cloned().product::<i32>(), 1);
+}
+
+#[test]
+fn test_iterator_product_result() {
+ let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().product::<Result<i32, _>>(), Ok(24));
+ let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)];
+ assert_eq!(v.iter().cloned().product::<Result<i32, _>>(), Err(()));
+}
+
+/// A wrapper struct that implements `Eq` and `Ord` based on the wrapped
+/// integer modulo 3. Used to test that `Iterator::max` and `Iterator::min`
+/// return the correct element if some of them are equal.
+#[derive(Debug)]
+struct Mod3(i32);
+
+impl PartialEq for Mod3 {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 % 3 == other.0 % 3
+ }
+}
+
+impl Eq for Mod3 {}
+
+impl PartialOrd for Mod3 {
+ fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Mod3 {
+ fn cmp(&self, other: &Self) -> core::cmp::Ordering {
+ (self.0 % 3).cmp(&(other.0 % 3))
+ }
+}
+
+#[test]
+fn test_iterator_product_option() {
+ let v: &[Option<i32>] = &[Some(1), Some(2), Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().product::<Option<i32>>(), Some(24));
+ let v: &[Option<i32>] = &[Some(1), None, Some(3), Some(4)];
+ assert_eq!(v.iter().cloned().product::<Option<i32>>(), None);
+}
+
+#[test]
+fn test_iterator_max() {
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().max(), Some(3));
+ assert_eq!(v.iter().cloned().max(), Some(10));
+ assert_eq!(v[..0].iter().cloned().max(), None);
+ assert_eq!(v.iter().cloned().map(Mod3).max().map(|x| x.0), Some(8));
+}
+
+#[test]
+fn test_iterator_min() {
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ assert_eq!(v[..4].iter().cloned().min(), Some(0));
+ assert_eq!(v.iter().cloned().min(), Some(0));
+ assert_eq!(v[..0].iter().cloned().min(), None);
+ assert_eq!(v.iter().cloned().map(Mod3).min().map(|x| x.0), Some(0));
+}
+
+#[test]
+fn test_iterator_size_hint() {
+ let c = (0..).step_by(1);
+ let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let v2 = &[10, 11, 12];
+ let vi = v.iter();
+
+ assert_eq!((0..).size_hint(), (usize::MAX, None));
+ assert_eq!(c.size_hint(), (usize::MAX, None));
+ assert_eq!(vi.clone().size_hint(), (10, Some(10)));
+
+ assert_eq!(c.clone().take(5).size_hint(), (5, Some(5)));
+ assert_eq!(c.clone().skip(5).size_hint().1, None);
+ assert_eq!(c.clone().take_while(|_| false).size_hint(), (0, None));
+ assert_eq!(c.clone().map_while(|_| None::<()>).size_hint(), (0, None));
+ assert_eq!(c.clone().skip_while(|_| false).size_hint(), (0, None));
+ assert_eq!(c.clone().enumerate().size_hint(), (usize::MAX, None));
+ assert_eq!(c.clone().chain(vi.clone().cloned()).size_hint(), (usize::MAX, None));
+ assert_eq!(c.clone().zip(vi.clone()).size_hint(), (10, Some(10)));
+ assert_eq!(c.clone().scan(0, |_, _| Some(0)).size_hint(), (0, None));
+ assert_eq!(c.clone().filter(|_| false).size_hint(), (0, None));
+ assert_eq!(c.clone().map(|_| 0).size_hint(), (usize::MAX, None));
+ assert_eq!(c.filter_map(|_| Some(0)).size_hint(), (0, None));
+
+ assert_eq!(vi.clone().take(5).size_hint(), (5, Some(5)));
+ assert_eq!(vi.clone().take(12).size_hint(), (10, Some(10)));
+ assert_eq!(vi.clone().skip(3).size_hint(), (7, Some(7)));
+ assert_eq!(vi.clone().skip(12).size_hint(), (0, Some(0)));
+ assert_eq!(vi.clone().take_while(|_| false).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().map_while(|_| None::<()>).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().skip_while(|_| false).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().enumerate().size_hint(), (10, Some(10)));
+ assert_eq!(vi.clone().chain(v2).size_hint(), (13, Some(13)));
+ assert_eq!(vi.clone().zip(v2).size_hint(), (3, Some(3)));
+ assert_eq!(vi.clone().scan(0, |_, _| Some(0)).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().filter(|_| false).size_hint(), (0, Some(10)));
+ assert_eq!(vi.clone().map(|&i| i + 1).size_hint(), (10, Some(10)));
+ assert_eq!(vi.filter_map(|_| Some(0)).size_hint(), (0, Some(10)));
+}
+
+#[test]
+fn test_collect() {
+ let a = vec![1, 2, 3, 4, 5];
+ let b: Vec<isize> = a.iter().cloned().collect();
+ assert!(a == b);
+}
+
+#[test]
+fn test_all() {
+ let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]);
+ assert!(v.iter().all(|&x| x < 10));
+ assert!(!v.iter().all(|&x| x % 2 == 0));
+ assert!(!v.iter().all(|&x| x > 100));
+ assert!(v[..0].iter().all(|_| panic!()));
+}
+
+#[test]
+fn test_any() {
+ let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]);
+ assert!(v.iter().any(|&x| x < 10));
+ assert!(v.iter().any(|&x| x % 2 == 0));
+ assert!(!v.iter().any(|&x| x > 100));
+ assert!(!v[..0].iter().any(|_| panic!()));
+}
+
+#[test]
+fn test_find() {
+ let v: &[isize] = &[1, 3, 9, 27, 103, 14, 11];
+ assert_eq!(*v.iter().find(|&&x| x & 1 == 0).unwrap(), 14);
+ assert_eq!(*v.iter().find(|&&x| x % 3 == 0).unwrap(), 3);
+ assert!(v.iter().find(|&&x| x % 12 == 0).is_none());
+}
+
+#[test]
+fn test_find_map() {
+ let xs: &[isize] = &[];
+ assert_eq!(xs.iter().find_map(half_if_even), None);
+ let xs: &[isize] = &[3, 5];
+ assert_eq!(xs.iter().find_map(half_if_even), None);
+ let xs: &[isize] = &[4, 5];
+ assert_eq!(xs.iter().find_map(half_if_even), Some(2));
+ let xs: &[isize] = &[3, 6];
+ assert_eq!(xs.iter().find_map(half_if_even), Some(3));
+
+ let xs: &[isize] = &[1, 2, 3, 4, 5, 6, 7];
+ let mut iter = xs.iter();
+ assert_eq!(iter.find_map(half_if_even), Some(1));
+ assert_eq!(iter.find_map(half_if_even), Some(2));
+ assert_eq!(iter.find_map(half_if_even), Some(3));
+ assert_eq!(iter.next(), Some(&7));
+
+ fn half_if_even(x: &isize) -> Option<isize> {
+ if x % 2 == 0 { Some(x / 2) } else { None }
+ }
+}
+
+#[test]
+fn test_try_find() {
+ let xs: &[isize] = &[];
+ assert_eq!(xs.iter().try_find(testfn), Ok(None));
+ let xs: &[isize] = &[1, 2, 3, 4];
+ assert_eq!(xs.iter().try_find(testfn), Ok(Some(&2)));
+ let xs: &[isize] = &[1, 3, 4];
+ assert_eq!(xs.iter().try_find(testfn), Err(()));
+
+ let xs: &[isize] = &[1, 2, 3, 4, 5, 6, 7];
+ let mut iter = xs.iter();
+ assert_eq!(iter.try_find(testfn), Ok(Some(&2)));
+ assert_eq!(iter.try_find(testfn), Err(()));
+ assert_eq!(iter.next(), Some(&5));
+
+ fn testfn(x: &&isize) -> Result<bool, ()> {
+ if **x == 2 {
+ return Ok(true);
+ }
+ if **x == 4 {
+ return Err(());
+ }
+ Ok(false)
+ }
+}
+
+#[test]
+fn test_try_find_api_usability() -> Result<(), Box<dyn std::error::Error>> {
+ let a = ["1", "2"];
+
+ let is_my_num = |s: &str, search: i32| -> Result<bool, std::num::ParseIntError> {
+ Ok(s.parse::<i32>()? == search)
+ };
+
+ let val = a.iter().try_find(|&&s| is_my_num(s, 2))?;
+ assert_eq!(val, Some(&"2"));
+
+ Ok(())
+}
+
+#[test]
+fn test_position() {
+ let v = &[1, 3, 9, 27, 103, 14, 11];
+ assert_eq!(v.iter().position(|x| *x & 1 == 0).unwrap(), 5);
+ assert_eq!(v.iter().position(|x| *x % 3 == 0).unwrap(), 1);
+ assert!(v.iter().position(|x| *x % 12 == 0).is_none());
+}
+
+#[test]
+fn test_count() {
+ let xs = &[1, 2, 2, 1, 5, 9, 0, 2];
+ assert_eq!(xs.iter().filter(|x| **x == 2).count(), 3);
+ assert_eq!(xs.iter().filter(|x| **x == 5).count(), 1);
+ assert_eq!(xs.iter().filter(|x| **x == 95).count(), 0);
+}
+
+#[test]
+fn test_max_by_key() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().max_by_key(|x| x.abs()).unwrap(), -10);
+}
+
+#[test]
+fn test_max_by() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().max_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), -10);
+}
+
+#[test]
+fn test_min_by_key() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().min_by_key(|x| x.abs()).unwrap(), 0);
+}
+
+#[test]
+fn test_min_by() {
+ let xs: &[isize] = &[-3, 0, 1, 5, -10];
+ assert_eq!(*xs.iter().min_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), 0);
+}
+
+#[test]
+fn test_by_ref() {
+ let mut xs = 0..10;
+ // sum the first five values
+ let partial_sum = xs.by_ref().take(5).fold(0, |a, b| a + b);
+ assert_eq!(partial_sum, 10);
+ assert_eq!(xs.next(), Some(5));
+}
+
+#[test]
+fn test_rev() {
+ let xs = [2, 4, 6, 8, 10, 12, 14, 16];
+ let mut it = xs.iter();
+ it.next();
+ it.next();
+ assert!(it.rev().cloned().collect::<Vec<isize>>() == vec![16, 14, 12, 10, 8, 6]);
+}
+
+#[test]
+fn test_copied() {
+ let xs = [2, 4, 6, 8];
+
+ let mut it = xs.iter().copied();
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next(), Some(4));
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back(), Some(8));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_cloned() {
+ let xs = [2, 4, 6, 8];
+
+ let mut it = xs.iter().cloned();
+ assert_eq!(it.len(), 4);
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next(), Some(4));
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back(), Some(8));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(6));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_cloned_side_effects() {
+ let mut count = 0;
+ {
+ let iter = [1, 2, 3]
+ .iter()
+ .map(|x| {
+ count += 1;
+ x
+ })
+ .cloned()
+ .zip(&[1]);
+ for _ in iter {}
+ }
+ assert_eq!(count, 2);
+}
+
+#[test]
+fn test_double_ended_map() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().map(|&x| x * -1);
+ assert_eq!(it.next(), Some(-1));
+ assert_eq!(it.next(), Some(-2));
+ assert_eq!(it.next_back(), Some(-6));
+ assert_eq!(it.next_back(), Some(-5));
+ assert_eq!(it.next(), Some(-3));
+ assert_eq!(it.next_back(), Some(-4));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_double_ended_enumerate() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().cloned().enumerate();
+ assert_eq!(it.next(), Some((0, 1)));
+ assert_eq!(it.next(), Some((1, 2)));
+ assert_eq!(it.next_back(), Some((5, 6)));
+ assert_eq!(it.next_back(), Some((4, 5)));
+ assert_eq!(it.next_back(), Some((3, 4)));
+ assert_eq!(it.next_back(), Some((2, 3)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_double_ended_zip() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let ys = [1, 2, 3, 7];
+ let a = xs.iter().cloned();
+ let b = ys.iter().cloned();
+ let mut it = a.zip(b);
+ assert_eq!(it.next(), Some((1, 1)));
+ assert_eq!(it.next(), Some((2, 2)));
+ assert_eq!(it.next_back(), Some((4, 7)));
+ assert_eq!(it.next_back(), Some((3, 3)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_double_ended_filter() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().filter(|&x| *x & 1 == 0);
+ assert_eq!(it.next_back().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &4);
+ assert_eq!(it.next().unwrap(), &2);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_double_ended_filter_map() {
+ let xs = [1, 2, 3, 4, 5, 6];
+ let mut it = xs.iter().filter_map(|&x| if x & 1 == 0 { Some(x * 2) } else { None });
+ assert_eq!(it.next_back().unwrap(), 12);
+ assert_eq!(it.next_back().unwrap(), 8);
+ assert_eq!(it.next().unwrap(), 4);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_double_ended_chain() {
+ let xs = [1, 2, 3, 4, 5];
+ let ys = [7, 9, 11];
+ let mut it = xs.iter().chain(&ys).rev();
+ assert_eq!(it.next().unwrap(), &11);
+ assert_eq!(it.next().unwrap(), &9);
+ assert_eq!(it.next_back().unwrap(), &1);
+ assert_eq!(it.next_back().unwrap(), &2);
+ assert_eq!(it.next_back().unwrap(), &3);
+ assert_eq!(it.next_back().unwrap(), &4);
+ assert_eq!(it.next_back().unwrap(), &5);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back(), None);
+
+ // test that .chain() is well behaved with an unfused iterator
+ struct CrazyIterator(bool);
+ impl CrazyIterator {
+ fn new() -> CrazyIterator {
+ CrazyIterator(false)
+ }
+ }
+ impl Iterator for CrazyIterator {
+ type Item = i32;
+ fn next(&mut self) -> Option<i32> {
+ if self.0 {
+ Some(99)
+ } else {
+ self.0 = true;
+ None
+ }
+ }
+ }
+
+ impl DoubleEndedIterator for CrazyIterator {
+ fn next_back(&mut self) -> Option<i32> {
+ self.next()
+ }
+ }
+
+ assert_eq!(CrazyIterator::new().chain(0..10).rev().last(), Some(0));
+ assert!((0..10).chain(CrazyIterator::new()).rev().any(|i| i == 0));
+}
+
+#[test]
+fn test_rposition() {
+ fn f(xy: &(isize, char)) -> bool {
+ let (_x, y) = *xy;
+ y == 'b'
+ }
+ fn g(xy: &(isize, char)) -> bool {
+ let (_x, y) = *xy;
+ y == 'd'
+ }
+ let v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')];
+
+ assert_eq!(v.iter().rposition(f), Some(3));
+ assert!(v.iter().rposition(g).is_none());
+}
+
+#[test]
+fn test_rev_rposition() {
+ let v = [0, 0, 1, 1];
+ assert_eq!(v.iter().rev().rposition(|&x| x == 1), Some(1));
+}
+
+#[test]
+#[should_panic]
+fn test_rposition_panic() {
+ let v: [(Box<_>, Box<_>); 4] = [(box 0, box 0), (box 0, box 0), (box 0, box 0), (box 0, box 0)];
+ let mut i = 0;
+ v.iter().rposition(|_elt| {
+ if i == 2 {
+ panic!()
+ }
+ i += 1;
+ false
+ });
+}
+
+#[test]
+fn test_double_ended_flat_map() {
+ let u = [0, 1];
+ let v = [5, 6, 7, 8];
+ let mut it = u.iter().flat_map(|x| &v[*x..v.len()]);
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &5);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_double_ended_flatten() {
+ let u = [0, 1];
+ let v = [5, 6, 7, 8];
+ let mut it = u.iter().map(|x| &v[*x..v.len()]).flatten();
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &5);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &8);
+ assert_eq!(it.next().unwrap(), &6);
+ assert_eq!(it.next_back().unwrap(), &7);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+}
+
+#[test]
+fn test_double_ended_range() {
+ assert_eq!((11..14).rev().collect::<Vec<_>>(), [13, 12, 11]);
+ for _ in (10..0).rev() {
+ panic!("unreachable");
+ }
+
+ assert_eq!((11..14).rev().collect::<Vec<_>>(), [13, 12, 11]);
+ for _ in (10..0).rev() {
+ panic!("unreachable");
+ }
+}
+
+#[test]
+fn test_range() {
+ assert_eq!((0..5).collect::<Vec<_>>(), [0, 1, 2, 3, 4]);
+ assert_eq!((-10..-1).collect::<Vec<_>>(), [-10, -9, -8, -7, -6, -5, -4, -3, -2]);
+ assert_eq!((0..5).rev().collect::<Vec<_>>(), [4, 3, 2, 1, 0]);
+ assert_eq!((200..-5).count(), 0);
+ assert_eq!((200..-5).rev().count(), 0);
+ assert_eq!((200..200).count(), 0);
+ assert_eq!((200..200).rev().count(), 0);
+
+ assert_eq!((0..100).size_hint(), (100, Some(100)));
+ // this test is only meaningful when sizeof usize < sizeof u64
+ assert_eq!((usize::MAX - 1..usize::MAX).size_hint(), (1, Some(1)));
+ assert_eq!((-10..-1).size_hint(), (9, Some(9)));
+ assert_eq!((-1..-10).size_hint(), (0, Some(0)));
+
+ assert_eq!((-70..58).size_hint(), (128, Some(128)));
+ assert_eq!((-128..127).size_hint(), (255, Some(255)));
+ assert_eq!(
+ (-2..isize::MAX).size_hint(),
+ (isize::MAX as usize + 2, Some(isize::MAX as usize + 2))
+ );
+}
+
+#[test]
+fn test_char_range() {
+ use std::char;
+ // Miri is too slow
+ let from = if cfg!(miri) { char::from_u32(0xD800 - 10).unwrap() } else { '\0' };
+ let to = if cfg!(miri) { char::from_u32(0xDFFF + 10).unwrap() } else { char::MAX };
+ assert!((from..=to).eq((from as u32..=to as u32).filter_map(char::from_u32)));
+ assert!((from..=to).rev().eq((from as u32..=to as u32).filter_map(char::from_u32).rev()));
+
+ assert_eq!(('\u{D7FF}'..='\u{E000}').count(), 2);
+ assert_eq!(('\u{D7FF}'..='\u{E000}').size_hint(), (2, Some(2)));
+ assert_eq!(('\u{D7FF}'..'\u{E000}').count(), 1);
+ assert_eq!(('\u{D7FF}'..'\u{E000}').size_hint(), (1, Some(1)));
+}
+
+#[test]
+fn test_range_exhaustion() {
+ let mut r = 10..10;
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r, 10..10);
+
+ let mut r = 10..12;
+ assert_eq!(r.next(), Some(10));
+ assert_eq!(r.next(), Some(11));
+ assert!(r.is_empty());
+ assert_eq!(r, 12..12);
+ assert_eq!(r.next(), None);
+
+ let mut r = 10..12;
+ assert_eq!(r.next_back(), Some(11));
+ assert_eq!(r.next_back(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r, 10..10);
+ assert_eq!(r.next_back(), None);
+
+ let mut r = 100..10;
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r, 100..10);
+}
+
+#[test]
+fn test_range_inclusive_exhaustion() {
+ let mut r = 10..=10;
+ assert_eq!(r.next(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next(), None);
+
+ assert_eq!(*r.start(), 10);
+ assert_eq!(*r.end(), 10);
+ assert_ne!(r, 10..=10);
+
+ let mut r = 10..=10;
+ assert_eq!(r.next_back(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r.next_back(), None);
+
+ assert_eq!(*r.start(), 10);
+ assert_eq!(*r.end(), 10);
+ assert_ne!(r, 10..=10);
+
+ let mut r = 10..=12;
+ assert_eq!(r.next(), Some(10));
+ assert_eq!(r.next(), Some(11));
+ assert_eq!(r.next(), Some(12));
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+
+ let mut r = 10..=12;
+ assert_eq!(r.next_back(), Some(12));
+ assert_eq!(r.next_back(), Some(11));
+ assert_eq!(r.next_back(), Some(10));
+ assert!(r.is_empty());
+ assert_eq!(r.next_back(), None);
+
+ let mut r = 10..=12;
+ assert_eq!(r.nth(2), Some(12));
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+
+ let mut r = 10..=12;
+ assert_eq!(r.nth(5), None);
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+
+ let mut r = 100..=10;
+ assert_eq!(r.next(), None);
+ assert!(r.is_empty());
+ assert_eq!(r.next(), None);
+ assert_eq!(r.next(), None);
+ assert_eq!(r, 100..=10);
+
+ let mut r = 100..=10;
+ assert_eq!(r.next_back(), None);
+ assert!(r.is_empty());
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r.next_back(), None);
+ assert_eq!(r, 100..=10);
+}
+
+#[test]
+fn test_range_nth() {
+ assert_eq!((10..15).nth(0), Some(10));
+ assert_eq!((10..15).nth(1), Some(11));
+ assert_eq!((10..15).nth(4), Some(14));
+ assert_eq!((10..15).nth(5), None);
+
+ let mut r = 10..20;
+ assert_eq!(r.nth(2), Some(12));
+ assert_eq!(r, 13..20);
+ assert_eq!(r.nth(2), Some(15));
+ assert_eq!(r, 16..20);
+ assert_eq!(r.nth(10), None);
+ assert_eq!(r, 20..20);
+}
+
+#[test]
+fn test_range_nth_back() {
+ assert_eq!((10..15).nth_back(0), Some(14));
+ assert_eq!((10..15).nth_back(1), Some(13));
+ assert_eq!((10..15).nth_back(4), Some(10));
+ assert_eq!((10..15).nth_back(5), None);
+ assert_eq!((-120..80_i8).nth_back(199), Some(-120));
+
+ let mut r = 10..20;
+ assert_eq!(r.nth_back(2), Some(17));
+ assert_eq!(r, 10..17);
+ assert_eq!(r.nth_back(2), Some(14));
+ assert_eq!(r, 10..14);
+ assert_eq!(r.nth_back(10), None);
+ assert_eq!(r, 10..10);
+}
+
+#[test]
+fn test_range_from_nth() {
+ assert_eq!((10..).nth(0), Some(10));
+ assert_eq!((10..).nth(1), Some(11));
+ assert_eq!((10..).nth(4), Some(14));
+
+ let mut r = 10..;
+ assert_eq!(r.nth(2), Some(12));
+ assert_eq!(r, 13..);
+ assert_eq!(r.nth(2), Some(15));
+ assert_eq!(r, 16..);
+ assert_eq!(r.nth(10), Some(26));
+ assert_eq!(r, 27..);
+
+ assert_eq!((0..).size_hint(), (usize::MAX, None));
+}
+
+fn is_trusted_len<I: TrustedLen>(_: I) {}
+
+#[test]
+fn test_range_from_take() {
+ let mut it = (0..).take(3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.next(), None);
+ is_trusted_len((0..).take(3));
+ assert_eq!((0..).take(3).size_hint(), (3, Some(3)));
+ assert_eq!((0..).take(0).size_hint(), (0, Some(0)));
+ assert_eq!((0..).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_range_from_take_collect() {
+ let v: Vec<_> = (0..).take(3).collect();
+ assert_eq!(v, vec![0, 1, 2]);
+}
+
+#[test]
+fn test_range_inclusive_nth() {
+ assert_eq!((10..=15).nth(0), Some(10));
+ assert_eq!((10..=15).nth(1), Some(11));
+ assert_eq!((10..=15).nth(5), Some(15));
+ assert_eq!((10..=15).nth(6), None);
+
+ let mut exhausted_via_next = 10_u8..=20;
+ while exhausted_via_next.next().is_some() {}
+
+ let mut r = 10_u8..=20;
+ assert_eq!(r.nth(2), Some(12));
+ assert_eq!(r, 13..=20);
+ assert_eq!(r.nth(2), Some(15));
+ assert_eq!(r, 16..=20);
+ assert_eq!(r.is_empty(), false);
+ assert_eq!(ExactSizeIterator::is_empty(&r), false);
+ assert_eq!(r.nth(10), None);
+ assert_eq!(r.is_empty(), true);
+ assert_eq!(r, exhausted_via_next);
+ assert_eq!(ExactSizeIterator::is_empty(&r), true);
+}
+
+#[test]
+fn test_range_inclusive_nth_back() {
+ assert_eq!((10..=15).nth_back(0), Some(15));
+ assert_eq!((10..=15).nth_back(1), Some(14));
+ assert_eq!((10..=15).nth_back(5), Some(10));
+ assert_eq!((10..=15).nth_back(6), None);
+ assert_eq!((-120..=80_i8).nth_back(200), Some(-120));
+
+ let mut exhausted_via_next_back = 10_u8..=20;
+ while exhausted_via_next_back.next_back().is_some() {}
+
+ let mut r = 10_u8..=20;
+ assert_eq!(r.nth_back(2), Some(18));
+ assert_eq!(r, 10..=17);
+ assert_eq!(r.nth_back(2), Some(15));
+ assert_eq!(r, 10..=14);
+ assert_eq!(r.is_empty(), false);
+ assert_eq!(ExactSizeIterator::is_empty(&r), false);
+ assert_eq!(r.nth_back(10), None);
+ assert_eq!(r.is_empty(), true);
+ assert_eq!(r, exhausted_via_next_back);
+ assert_eq!(ExactSizeIterator::is_empty(&r), true);
+}
+
+#[test]
+fn test_range_len() {
+ assert_eq!((0..10_u8).len(), 10);
+ assert_eq!((9..10_u8).len(), 1);
+ assert_eq!((10..10_u8).len(), 0);
+ assert_eq!((11..10_u8).len(), 0);
+ assert_eq!((100..10_u8).len(), 0);
+}
+
+#[test]
+fn test_range_inclusive_len() {
+ assert_eq!((0..=10_u8).len(), 11);
+ assert_eq!((9..=10_u8).len(), 2);
+ assert_eq!((10..=10_u8).len(), 1);
+ assert_eq!((11..=10_u8).len(), 0);
+ assert_eq!((100..=10_u8).len(), 0);
+}
+
+#[test]
+fn test_range_step() {
+ #![allow(deprecated)]
+
+ assert_eq!((0..20).step_by(5).collect::<Vec<isize>>(), [0, 5, 10, 15]);
+ assert_eq!((1..21).rev().step_by(5).collect::<Vec<isize>>(), [20, 15, 10, 5]);
+ assert_eq!((1..21).rev().step_by(6).collect::<Vec<isize>>(), [20, 14, 8, 2]);
+ assert_eq!((200..255).step_by(50).collect::<Vec<u8>>(), [200, 250]);
+ assert_eq!((200..-5).step_by(1).collect::<Vec<isize>>(), []);
+ assert_eq!((200..200).step_by(1).collect::<Vec<isize>>(), []);
+
+ assert_eq!((0..20).step_by(1).size_hint(), (20, Some(20)));
+ assert_eq!((0..20).step_by(21).size_hint(), (1, Some(1)));
+ assert_eq!((0..20).step_by(5).size_hint(), (4, Some(4)));
+ assert_eq!((1..21).rev().step_by(5).size_hint(), (4, Some(4)));
+ assert_eq!((1..21).rev().step_by(6).size_hint(), (4, Some(4)));
+ assert_eq!((20..-5).step_by(1).size_hint(), (0, Some(0)));
+ assert_eq!((20..20).step_by(1).size_hint(), (0, Some(0)));
+ assert_eq!((i8::MIN..i8::MAX).step_by(-(i8::MIN as i32) as usize).size_hint(), (2, Some(2)));
+ assert_eq!((i16::MIN..i16::MAX).step_by(i16::MAX as usize).size_hint(), (3, Some(3)));
+ assert_eq!((isize::MIN..isize::MAX).step_by(1).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_step_by_skip() {
+ assert_eq!((0..640).step_by(128).skip(1).collect::<Vec<_>>(), [128, 256, 384, 512]);
+ assert_eq!((0..=50).step_by(10).nth(3), Some(30));
+ assert_eq!((200..=255u8).step_by(10).nth(3), Some(230));
+}
+
+#[test]
+fn test_range_inclusive_step() {
+ assert_eq!((0..=50).step_by(10).collect::<Vec<_>>(), [0, 10, 20, 30, 40, 50]);
+ assert_eq!((0..=5).step_by(1).collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5]);
+ assert_eq!((200..=255u8).step_by(10).collect::<Vec<_>>(), [200, 210, 220, 230, 240, 250]);
+ assert_eq!((250..=255u8).step_by(1).collect::<Vec<_>>(), [250, 251, 252, 253, 254, 255]);
+}
+
+#[test]
+fn test_range_last_max() {
+ assert_eq!((0..20).last(), Some(19));
+ assert_eq!((-20..0).last(), Some(-1));
+ assert_eq!((5..5).last(), None);
+
+ assert_eq!((0..20).max(), Some(19));
+ assert_eq!((-20..0).max(), Some(-1));
+ assert_eq!((5..5).max(), None);
+}
+
+#[test]
+fn test_range_inclusive_last_max() {
+ assert_eq!((0..=20).last(), Some(20));
+ assert_eq!((-20..=0).last(), Some(0));
+ assert_eq!((5..=5).last(), Some(5));
+ let mut r = 10..=10;
+ r.next();
+ assert_eq!(r.last(), None);
+
+ assert_eq!((0..=20).max(), Some(20));
+ assert_eq!((-20..=0).max(), Some(0));
+ assert_eq!((5..=5).max(), Some(5));
+ let mut r = 10..=10;
+ r.next();
+ assert_eq!(r.max(), None);
+}
+
+#[test]
+fn test_range_min() {
+ assert_eq!((0..20).min(), Some(0));
+ assert_eq!((-20..0).min(), Some(-20));
+ assert_eq!((5..5).min(), None);
+}
+
+#[test]
+fn test_range_inclusive_min() {
+ assert_eq!((0..=20).min(), Some(0));
+ assert_eq!((-20..=0).min(), Some(-20));
+ assert_eq!((5..=5).min(), Some(5));
+ let mut r = 10..=10;
+ r.next();
+ assert_eq!(r.min(), None);
+}
+
+#[test]
+fn test_range_inclusive_folds() {
+ assert_eq!((1..=10).sum::<i32>(), 55);
+ assert_eq!((1..=10).rev().sum::<i32>(), 55);
+
+ let mut it = 44..=50;
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it, 47..=50);
+ assert_eq!(it.try_fold(0, i8::checked_add), None);
+ assert_eq!(it, 50..=50);
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(50));
+ assert!(it.is_empty());
+ assert_eq!(it.try_fold(0, i8::checked_add), Some(0));
+ assert!(it.is_empty());
+
+ let mut it = 40..=47;
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it, 40..=44);
+ assert_eq!(it.try_rfold(0, i8::checked_add), None);
+ assert_eq!(it, 40..=41);
+ assert_eq!(it.try_rfold(0, i8::checked_add), Some(81));
+ assert!(it.is_empty());
+ assert_eq!(it.try_rfold(0, i8::checked_add), Some(0));
+ assert!(it.is_empty());
+
+ let mut it = 10..=20;
+ assert_eq!(it.try_fold(0, |a, b| Some(a + b)), Some(165));
+ assert!(it.is_empty());
+ assert_eq!(it.try_fold(0, |a, b| Some(a + b)), Some(0));
+ assert!(it.is_empty());
+
+ let mut it = 10..=20;
+ assert_eq!(it.try_rfold(0, |a, b| Some(a + b)), Some(165));
+ assert!(it.is_empty());
+ assert_eq!(it.try_rfold(0, |a, b| Some(a + b)), Some(0));
+ assert!(it.is_empty());
+}
+
+#[test]
+fn test_range_size_hint() {
+ assert_eq!((0..0usize).size_hint(), (0, Some(0)));
+ assert_eq!((0..100usize).size_hint(), (100, Some(100)));
+ assert_eq!((0..usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+
+ let umax = u128::try_from(usize::MAX).unwrap();
+ assert_eq!((0..0u128).size_hint(), (0, Some(0)));
+ assert_eq!((0..100u128).size_hint(), (100, Some(100)));
+ assert_eq!((0..umax).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((0..umax + 1).size_hint(), (usize::MAX, None));
+
+ assert_eq!((0..0isize).size_hint(), (0, Some(0)));
+ assert_eq!((-100..100isize).size_hint(), (200, Some(200)));
+ assert_eq!((isize::MIN..isize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+
+ let imin = i128::try_from(isize::MIN).unwrap();
+ let imax = i128::try_from(isize::MAX).unwrap();
+ assert_eq!((0..0i128).size_hint(), (0, Some(0)));
+ assert_eq!((-100..100i128).size_hint(), (200, Some(200)));
+ assert_eq!((imin..imax).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((imin..imax + 1).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_range_inclusive_size_hint() {
+ assert_eq!((1..=0usize).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0usize).size_hint(), (1, Some(1)));
+ assert_eq!((0..=100usize).size_hint(), (101, Some(101)));
+ assert_eq!((0..=usize::MAX - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((0..=usize::MAX).size_hint(), (usize::MAX, None));
+
+ let umax = u128::try_from(usize::MAX).unwrap();
+ assert_eq!((1..=0u128).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0u128).size_hint(), (1, Some(1)));
+ assert_eq!((0..=100u128).size_hint(), (101, Some(101)));
+ assert_eq!((0..=umax - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((0..=umax).size_hint(), (usize::MAX, None));
+ assert_eq!((0..=umax + 1).size_hint(), (usize::MAX, None));
+
+ assert_eq!((0..=-1isize).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0isize).size_hint(), (1, Some(1)));
+ assert_eq!((-100..=100isize).size_hint(), (201, Some(201)));
+ assert_eq!((isize::MIN..=isize::MAX - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((isize::MIN..=isize::MAX).size_hint(), (usize::MAX, None));
+
+ let imin = i128::try_from(isize::MIN).unwrap();
+ let imax = i128::try_from(isize::MAX).unwrap();
+ assert_eq!((0..=-1i128).size_hint(), (0, Some(0)));
+ assert_eq!((0..=0i128).size_hint(), (1, Some(1)));
+ assert_eq!((-100..=100i128).size_hint(), (201, Some(201)));
+ assert_eq!((imin..=imax - 1).size_hint(), (usize::MAX, Some(usize::MAX)));
+ assert_eq!((imin..=imax).size_hint(), (usize::MAX, None));
+ assert_eq!((imin..=imax + 1).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_repeat() {
+ let mut it = repeat(42);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(repeat(42).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_repeat_take() {
+ let mut it = repeat(42).take(3);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), None);
+ is_trusted_len(repeat(42).take(3));
+ assert_eq!(repeat(42).take(3).size_hint(), (3, Some(3)));
+ assert_eq!(repeat(42).take(0).size_hint(), (0, Some(0)));
+ assert_eq!(repeat(42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_repeat_take_collect() {
+ let v: Vec<_> = repeat(42).take(3).collect();
+ assert_eq!(v, vec![42, 42, 42]);
+}
+
+#[test]
+fn test_repeat_with() {
+ #[derive(PartialEq, Debug)]
+ struct NotClone(usize);
+ let mut it = repeat_with(|| NotClone(42));
+ assert_eq!(it.next(), Some(NotClone(42)));
+ assert_eq!(it.next(), Some(NotClone(42)));
+ assert_eq!(it.next(), Some(NotClone(42)));
+ assert_eq!(repeat_with(|| NotClone(42)).size_hint(), (usize::MAX, None));
+}
+
+#[test]
+fn test_repeat_with_take() {
+ let mut it = repeat_with(|| 42).take(3);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), None);
+ is_trusted_len(repeat_with(|| 42).take(3));
+ assert_eq!(repeat_with(|| 42).take(3).size_hint(), (3, Some(3)));
+ assert_eq!(repeat_with(|| 42).take(0).size_hint(), (0, Some(0)));
+ assert_eq!(repeat_with(|| 42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX)));
+}
+
+#[test]
+fn test_repeat_with_take_collect() {
+ let mut curr = 1;
+ let v: Vec<_> = repeat_with(|| {
+ let tmp = curr;
+ curr *= 2;
+ tmp
+ })
+ .take(5)
+ .collect();
+ assert_eq!(v, vec![1, 2, 4, 8, 16]);
+}
+
+#[test]
+fn test_successors() {
+ let mut powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10));
+ assert_eq!(powers_of_10.by_ref().collect::<Vec<_>>(), &[1, 10, 100, 1_000, 10_000]);
+ assert_eq!(powers_of_10.next(), None);
+
+ let mut empty = successors(None::<u32>, |_| unimplemented!());
+ assert_eq!(empty.next(), None);
+ assert_eq!(empty.next(), None);
+}
+
+#[test]
+fn test_fuse() {
+ let mut it = 0..3;
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(2));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_fuse_nth() {
+ let xs = [0, 1, 2];
+ let mut it = xs.iter();
+
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.nth(2), Some(&2));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.nth(2), None);
+ assert_eq!(it.len(), 0);
+}
+
+#[test]
+fn test_fuse_last() {
+ let xs = [0, 1, 2];
+ let it = xs.iter();
+
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.last(), Some(&2));
+}
+
+#[test]
+fn test_fuse_count() {
+ let xs = [0, 1, 2];
+ let it = xs.iter();
+
+ assert_eq!(it.len(), 3);
+ assert_eq!(it.count(), 3);
+ // Can't check len now because count consumes.
+}
+
+#[test]
+fn test_fuse_fold() {
+ let xs = [0, 1, 2];
+ let it = xs.iter(); // `FusedIterator`
+ let i = it.fuse().fold(0, |i, &x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+
+ let it = xs.iter(); // `FusedIterator`
+ let i = it.fuse().rfold(xs.len(), |i, &x| {
+ assert_eq!(x, xs[i - 1]);
+ i - 1
+ });
+ assert_eq!(i, 0);
+
+ let it = xs.iter().scan((), |_, &x| Some(x)); // `!FusedIterator`
+ let i = it.fuse().fold(0, |i, x| {
+ assert_eq!(x, xs[i]);
+ i + 1
+ });
+ assert_eq!(i, xs.len());
+}
+
+#[test]
+fn test_once() {
+ let mut it = once(42);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_once_with() {
+ let count = Cell::new(0);
+ let mut it = once_with(|| {
+ count.set(count.get() + 1);
+ 42
+ });
+
+ assert_eq!(count.get(), 0);
+ assert_eq!(it.next(), Some(42));
+ assert_eq!(count.get(), 1);
+ assert_eq!(it.next(), None);
+ assert_eq!(count.get(), 1);
+ assert_eq!(it.next(), None);
+ assert_eq!(count.get(), 1);
+}
+
+#[test]
+fn test_empty() {
+ let mut it = empty::<i32>();
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_chain_fold() {
+ let xs = [1, 2, 3];
+ let ys = [1, 2, 0];
+
+ let mut iter = xs.iter().chain(&ys);
+ iter.next();
+ let mut result = Vec::new();
+ iter.fold((), |(), &elt| result.push(elt));
+ assert_eq!(&[2, 3, 1, 2, 0], &result[..]);
+}
+
+#[test]
+fn test_steps_between() {
+ assert_eq!(Step::steps_between(&20_u8, &200_u8), Some(180_usize));
+ assert_eq!(Step::steps_between(&-20_i8, &80_i8), Some(100_usize));
+ assert_eq!(Step::steps_between(&-120_i8, &80_i8), Some(200_usize));
+ assert_eq!(Step::steps_between(&20_u32, &4_000_100_u32), Some(4_000_080_usize));
+ assert_eq!(Step::steps_between(&-20_i32, &80_i32), Some(100_usize));
+ assert_eq!(Step::steps_between(&-2_000_030_i32, &2_000_050_i32), Some(4_000_080_usize));
+
+ // Skip u64/i64 to avoid differences with 32-bit vs 64-bit platforms
+
+ assert_eq!(Step::steps_between(&20_u128, &200_u128), Some(180_usize));
+ assert_eq!(Step::steps_between(&-20_i128, &80_i128), Some(100_usize));
+ if cfg!(target_pointer_width = "64") {
+ assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_0009_u128), Some(usize::MAX));
+ }
+ assert_eq!(Step::steps_between(&10_u128, &0x1_0000_0000_0000_000a_u128), None);
+ assert_eq!(Step::steps_between(&10_i128, &0x1_0000_0000_0000_000a_i128), None);
+ assert_eq!(
+ Step::steps_between(&-0x1_0000_0000_0000_0000_i128, &0x1_0000_0000_0000_0000_i128,),
+ None,
+ );
+}
+
+#[test]
+fn test_step_forward() {
+ assert_eq!(Step::forward_checked(55_u8, 200_usize), Some(255_u8));
+ assert_eq!(Step::forward_checked(252_u8, 200_usize), None);
+ assert_eq!(Step::forward_checked(0_u8, 256_usize), None);
+ assert_eq!(Step::forward_checked(-110_i8, 200_usize), Some(90_i8));
+ assert_eq!(Step::forward_checked(-110_i8, 248_usize), None);
+ assert_eq!(Step::forward_checked(-126_i8, 256_usize), None);
+
+ assert_eq!(Step::forward_checked(35_u16, 100_usize), Some(135_u16));
+ assert_eq!(Step::forward_checked(35_u16, 65500_usize), Some(u16::MAX));
+ assert_eq!(Step::forward_checked(36_u16, 65500_usize), None);
+ assert_eq!(Step::forward_checked(-110_i16, 200_usize), Some(90_i16));
+ assert_eq!(Step::forward_checked(-20_030_i16, 50_050_usize), Some(30_020_i16));
+ assert_eq!(Step::forward_checked(-10_i16, 40_000_usize), None);
+ assert_eq!(Step::forward_checked(-10_i16, 70_000_usize), None);
+
+ assert_eq!(Step::forward_checked(10_u128, 70_000_usize), Some(70_010_u128));
+ assert_eq!(Step::forward_checked(10_i128, 70_030_usize), Some(70_040_i128));
+ assert_eq!(
+ Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0xff_usize),
+ Some(u128::MAX),
+ );
+ assert_eq!(
+ Step::forward_checked(0xffff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_u128, 0x100_usize),
+ None
+ );
+ assert_eq!(
+ Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0xff_usize),
+ Some(i128::MAX),
+ );
+ assert_eq!(
+ Step::forward_checked(0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
+ None
+ );
+}
+
+#[test]
+fn test_step_backward() {
+ assert_eq!(Step::backward_checked(255_u8, 200_usize), Some(55_u8));
+ assert_eq!(Step::backward_checked(100_u8, 200_usize), None);
+ assert_eq!(Step::backward_checked(255_u8, 256_usize), None);
+ assert_eq!(Step::backward_checked(90_i8, 200_usize), Some(-110_i8));
+ assert_eq!(Step::backward_checked(110_i8, 248_usize), None);
+ assert_eq!(Step::backward_checked(127_i8, 256_usize), None);
+
+ assert_eq!(Step::backward_checked(135_u16, 100_usize), Some(35_u16));
+ assert_eq!(Step::backward_checked(u16::MAX, 65500_usize), Some(35_u16));
+ assert_eq!(Step::backward_checked(10_u16, 11_usize), None);
+ assert_eq!(Step::backward_checked(90_i16, 200_usize), Some(-110_i16));
+ assert_eq!(Step::backward_checked(30_020_i16, 50_050_usize), Some(-20_030_i16));
+ assert_eq!(Step::backward_checked(-10_i16, 40_000_usize), None);
+ assert_eq!(Step::backward_checked(-10_i16, 70_000_usize), None);
+
+ assert_eq!(Step::backward_checked(70_010_u128, 70_000_usize), Some(10_u128));
+ assert_eq!(Step::backward_checked(70_020_i128, 70_030_usize), Some(-10_i128));
+ assert_eq!(Step::backward_checked(10_u128, 7_usize), Some(3_u128));
+ assert_eq!(Step::backward_checked(10_u128, 11_usize), None);
+ assert_eq!(
+ Step::backward_checked(-0x7fff_ffff_ffff_ffff__ffff_ffff_ffff_ff00_i128, 0x100_usize),
+ Some(i128::MIN)
+ );
+}
+
+#[test]
+fn test_rev_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((1..10).rev().try_fold(7, f), (1..10).try_rfold(7, f));
+ assert_eq!((1..10).rev().try_rfold(7, f), (1..10).try_fold(7, f));
+
+ let a = [10, 20, 30, 40, 100, 60, 70, 80, 90];
+ let mut iter = a.iter().rev();
+ assert_eq!(iter.try_fold(0_i8, |acc, &x| acc.checked_add(x)), None);
+ assert_eq!(iter.next(), Some(&70));
+ let mut iter = a.iter().rev();
+ assert_eq!(iter.try_rfold(0_i8, |acc, &x| acc.checked_add(x)), None);
+ assert_eq!(iter.next_back(), Some(&60));
+}
+
+#[test]
+fn test_cloned_try_folds() {
+ let a = [1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ let f_ref = &|acc, &x| i32::checked_add(2 * acc, x);
+ assert_eq!(a.iter().cloned().try_fold(7, f), a.iter().try_fold(7, f_ref));
+ assert_eq!(a.iter().cloned().try_rfold(7, f), a.iter().try_rfold(7, f_ref));
+
+ let a = [10, 20, 30, 40, 100, 60, 70, 80, 90];
+ let mut iter = a.iter().cloned();
+ assert_eq!(iter.try_fold(0_i8, |acc, x| acc.checked_add(x)), None);
+ assert_eq!(iter.next(), Some(60));
+ let mut iter = a.iter().cloned();
+ assert_eq!(iter.try_rfold(0_i8, |acc, x| acc.checked_add(x)), None);
+ assert_eq!(iter.next_back(), Some(70));
+}
+
+#[test]
+fn test_chain_try_folds() {
+ let c = || (0..10).chain(10..20);
+
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!(c().try_fold(7, f), (0..20).try_fold(7, f));
+ assert_eq!(c().try_rfold(7, f), (0..20).rev().try_fold(7, f));
+
+ let mut iter = c();
+ assert_eq!(iter.position(|x| x == 5), Some(5));
+ assert_eq!(iter.next(), Some(6), "stopped in front, state Both");
+ assert_eq!(iter.position(|x| x == 13), Some(6));
+ assert_eq!(iter.next(), Some(14), "stopped in back, state Back");
+ assert_eq!(iter.try_fold(0, |acc, x| Some(acc + x)), Some((15..20).sum()));
+
+ let mut iter = c().rev(); // use rev to access try_rfold
+ assert_eq!(iter.position(|x| x == 15), Some(4));
+ assert_eq!(iter.next(), Some(14), "stopped in back, state Both");
+ assert_eq!(iter.position(|x| x == 5), Some(8));
+ assert_eq!(iter.next(), Some(4), "stopped in front, state Front");
+ assert_eq!(iter.try_fold(0, |acc, x| Some(acc + x)), Some((0..4).sum()));
+
+ let mut iter = c();
+ iter.by_ref().rev().nth(14); // skip the last 15, ending in state Front
+ assert_eq!(iter.try_fold(7, f), (0..5).try_fold(7, f));
+
+ let mut iter = c();
+ iter.nth(14); // skip the first 15, ending in state Back
+ assert_eq!(iter.try_rfold(7, f), (15..20).try_rfold(7, f));
+}
+
+#[test]
+fn test_map_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((0..10).map(|x| x + 3).try_fold(7, f), (3..13).try_fold(7, f));
+ assert_eq!((0..10).map(|x| x + 3).try_rfold(7, f), (3..13).try_rfold(7, f));
+
+ let mut iter = (0..40).map(|x| x + 10);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(46));
+}
+
+#[test]
+fn test_filter_try_folds() {
+ fn p(&x: &i32) -> bool {
+ 0 <= x && x < 10
+ }
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((-10..20).filter(p).try_fold(7, f), (0..10).try_fold(7, f));
+ assert_eq!((-10..20).filter(p).try_rfold(7, f), (0..10).try_rfold(7, f));
+
+ let mut iter = (0..40).filter(|&x| x % 2 == 1);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(25));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(31));
+}
+
+#[test]
+fn test_filter_map_try_folds() {
+ let mp = &|x| if 0 <= x && x < 10 { Some(x * 2) } else { None };
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((-9..20).filter_map(mp).try_fold(7, f), (0..10).map(|x| 2 * x).try_fold(7, f));
+ assert_eq!((-9..20).filter_map(mp).try_rfold(7, f), (0..10).map(|x| 2 * x).try_rfold(7, f));
+
+ let mut iter = (0..40).filter_map(|x| if x % 2 == 1 { None } else { Some(x * 2 + 10) });
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(38));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(78));
+}
+
+#[test]
+fn test_enumerate_try_folds() {
+ let f = &|acc, (i, x)| usize::checked_add(2 * acc, x / (i + 1) + i);
+ assert_eq!((9..18).enumerate().try_fold(7, f), (0..9).map(|i| (i, i + 9)).try_fold(7, f));
+ assert_eq!((9..18).enumerate().try_rfold(7, f), (0..9).map(|i| (i, i + 9)).try_rfold(7, f));
+
+ let mut iter = (100..200).enumerate();
+ let f = &|acc, (i, x)| u8::checked_add(acc, u8::checked_div(x, i as u8 + 1)?);
+ assert_eq!(iter.try_fold(0, f), None);
+ assert_eq!(iter.next(), Some((7, 107)));
+ assert_eq!(iter.try_rfold(0, f), None);
+ assert_eq!(iter.next_back(), Some((11, 111)));
+}
+
+#[test]
+fn test_peek_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+
+ assert_eq!((1..20).peekable().try_fold(7, f), (1..20).try_fold(7, f));
+ assert_eq!((1..20).peekable().try_rfold(7, f), (1..20).try_rfold(7, f));
+
+ let mut iter = (1..20).peekable();
+ assert_eq!(iter.peek(), Some(&1));
+ assert_eq!(iter.try_fold(7, f), (1..20).try_fold(7, f));
+
+ let mut iter = (1..20).peekable();
+ assert_eq!(iter.peek(), Some(&1));
+ assert_eq!(iter.try_rfold(7, f), (1..20).try_rfold(7, f));
+
+ let mut iter = [100, 20, 30, 40, 50, 60, 70].iter().cloned().peekable();
+ assert_eq!(iter.peek(), Some(&100));
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.peek(), Some(&40));
+
+ let mut iter = [100, 20, 30, 40, 50, 60, 70].iter().cloned().peekable();
+ assert_eq!(iter.peek(), Some(&100));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.peek(), Some(&100));
+ assert_eq!(iter.next_back(), Some(50));
+
+ let mut iter = (2..5).peekable();
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_for_each(Err), Err(2));
+ assert_eq!(iter.peek(), Some(&3));
+ assert_eq!(iter.try_for_each(Err), Err(3));
+ assert_eq!(iter.peek(), Some(&4));
+ assert_eq!(iter.try_for_each(Err), Err(4));
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.try_for_each(Err), Ok(()));
+
+ let mut iter = (2..5).peekable();
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Err(4));
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Err(3));
+ assert_eq!(iter.peek(), Some(&2));
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Err(2));
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.try_rfold((), |(), x| Err(x)), Ok(()));
+}
+
+#[test]
+fn test_skip_while_try_fold() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ fn p(&x: &i32) -> bool {
+ (x % 10) <= 5
+ }
+ assert_eq!((1..20).skip_while(p).try_fold(7, f), (6..20).try_fold(7, f));
+ let mut iter = (1..20).skip_while(p);
+ assert_eq!(iter.nth(5), Some(11));
+ assert_eq!(iter.try_fold(7, f), (12..20).try_fold(7, f));
+
+ let mut iter = (0..50).skip_while(|&x| (x % 20) < 15);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(23));
+}
+
+#[test]
+fn test_take_while_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((1..20).take_while(|&x| x != 10).try_fold(7, f), (1..10).try_fold(7, f));
+ let mut iter = (1..20).take_while(|&x| x != 10);
+ assert_eq!(iter.try_fold(0, |x, y| Some(x + y)), Some((1..10).sum()));
+ assert_eq!(iter.next(), None, "flag should be set");
+ let iter = (1..20).take_while(|&x| x != 10);
+ assert_eq!(iter.fold(0, |x, y| x + y), (1..10).sum());
+
+ let mut iter = (10..50).take_while(|&x| x != 40);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+}
+
+#[test]
+fn test_skip_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((1..20).skip(9).try_fold(7, f), (10..20).try_fold(7, f));
+ assert_eq!((1..20).skip(9).try_rfold(7, f), (10..20).try_rfold(7, f));
+
+ let mut iter = (0..30).skip(10);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(24));
+}
+
+#[test]
+fn test_skip_nth_back() {
+ let xs = [0, 1, 2, 3, 4, 5];
+ let mut it = xs.iter().skip(2);
+ assert_eq!(it.nth_back(0), Some(&5));
+ assert_eq!(it.nth_back(1), Some(&3));
+ assert_eq!(it.nth_back(0), Some(&2));
+ assert_eq!(it.nth_back(0), None);
+
+ let ys = [2, 3, 4, 5];
+ let mut ity = ys.iter();
+ let mut it = xs.iter().skip(2);
+ assert_eq!(it.nth_back(1), ity.nth_back(1));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+ assert_eq!(it.nth_back(0), ity.nth_back(0));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+ assert_eq!(it.nth_back(0), ity.nth_back(0));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+ assert_eq!(it.nth_back(0), ity.nth_back(0));
+ assert_eq!(it.clone().nth(0), ity.clone().nth(0));
+
+ let mut it = xs.iter().skip(2);
+ assert_eq!(it.nth_back(4), None);
+ assert_eq!(it.nth_back(0), None);
+
+ let mut it = xs.iter();
+ it.by_ref().skip(2).nth_back(3);
+ assert_eq!(it.next_back(), Some(&1));
+
+ let mut it = xs.iter();
+ it.by_ref().skip(2).nth_back(10);
+ assert_eq!(it.next_back(), Some(&1));
+}
+
+#[test]
+fn test_take_try_folds() {
+ let f = &|acc, x| i32::checked_add(2 * acc, x);
+ assert_eq!((10..30).take(10).try_fold(7, f), (10..20).try_fold(7, f));
+ assert_eq!((10..30).take(10).try_rfold(7, f), (10..20).try_rfold(7, f));
+
+ let mut iter = (10..30).take(20);
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(20));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(24));
+
+ let mut iter = (2..20).take(3);
+ assert_eq!(iter.try_for_each(Err), Err(2));
+ assert_eq!(iter.try_for_each(Err), Err(3));
+ assert_eq!(iter.try_for_each(Err), Err(4));
+ assert_eq!(iter.try_for_each(Err), Ok(()));
+
+ let mut iter = (2..20).take(3).rev();
+ assert_eq!(iter.try_for_each(Err), Err(4));
+ assert_eq!(iter.try_for_each(Err), Err(3));
+ assert_eq!(iter.try_for_each(Err), Err(2));
+ assert_eq!(iter.try_for_each(Err), Ok(()));
+}
+
+#[test]
+fn test_flat_map_try_folds() {
+ let f = &|acc, x| i32::checked_add(acc * 2 / 3, x);
+ let mr = &|x| (5 * x)..(5 * x + 5);
+ assert_eq!((0..10).flat_map(mr).try_fold(7, f), (0..50).try_fold(7, f));
+ assert_eq!((0..10).flat_map(mr).try_rfold(7, f), (0..50).try_rfold(7, f));
+ let mut iter = (0..10).flat_map(mr);
+ iter.next();
+ iter.next_back(); // have front and back iters in progress
+ assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f));
+
+ let mut iter = (0..10).flat_map(|x| (4 * x)..(4 * x + 4));
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(17));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(35));
+}
+
+#[test]
+fn test_flatten_try_folds() {
+ let f = &|acc, x| i32::checked_add(acc * 2 / 3, x);
+ let mr = &|x| (5 * x)..(5 * x + 5);
+ assert_eq!((0..10).map(mr).flatten().try_fold(7, f), (0..50).try_fold(7, f));
+ assert_eq!((0..10).map(mr).flatten().try_rfold(7, f), (0..50).try_rfold(7, f));
+ let mut iter = (0..10).map(mr).flatten();
+ iter.next();
+ iter.next_back(); // have front and back iters in progress
+ assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f));
+
+ let mut iter = (0..10).map(|x| (4 * x)..(4 * x + 4)).flatten();
+ assert_eq!(iter.try_fold(0, i8::checked_add), None);
+ assert_eq!(iter.next(), Some(17));
+ assert_eq!(iter.try_rfold(0, i8::checked_add), None);
+ assert_eq!(iter.next_back(), Some(35));
+}
+
+#[test]
+fn test_functor_laws() {
+ // identity:
+ fn identity<T>(x: T) -> T {
+ x
+ }
+ assert_eq!((0..10).map(identity).sum::<usize>(), (0..10).sum());
+
+ // composition:
+ fn f(x: usize) -> usize {
+ x + 3
+ }
+ fn g(x: usize) -> usize {
+ x * 2
+ }
+ fn h(x: usize) -> usize {
+ g(f(x))
+ }
+ assert_eq!((0..10).map(f).map(g).sum::<usize>(), (0..10).map(h).sum());
+}
+
+#[test]
+fn test_monad_laws_left_identity() {
+ fn f(x: usize) -> impl Iterator<Item = usize> {
+ (0..10).map(move |y| x * y)
+ }
+ assert_eq!(once(42).flat_map(f.clone()).sum::<usize>(), f(42).sum());
+}
+
+#[test]
+fn test_monad_laws_right_identity() {
+ assert_eq!((0..10).flat_map(|x| once(x)).sum::<usize>(), (0..10).sum());
+}
+
+#[test]
+fn test_monad_laws_associativity() {
+ fn f(x: usize) -> impl Iterator<Item = usize> {
+ 0..x
+ }
+ fn g(x: usize) -> impl Iterator<Item = usize> {
+ (0..x).rev()
+ }
+ assert_eq!(
+ (0..10).flat_map(f).flat_map(g).sum::<usize>(),
+ (0..10).flat_map(|x| f(x).flat_map(g)).sum::<usize>()
+ );
+}
+
+#[test]
+fn test_is_sorted() {
+ assert!([1, 2, 2, 9].iter().is_sorted());
+ assert!(![1, 3, 2].iter().is_sorted());
+ assert!([0].iter().is_sorted());
+ assert!(std::iter::empty::<i32>().is_sorted());
+ assert!(![0.0, 1.0, f32::NAN].iter().is_sorted());
+ assert!([-2, -1, 0, 3].iter().is_sorted());
+ assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs()));
+ assert!(!["c", "bb", "aaa"].iter().is_sorted());
+ assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len()));
+}
+
+#[test]
+fn test_partition() {
+ fn check(xs: &mut [i32], ref p: impl Fn(&i32) -> bool, expected: usize) {
+ let i = xs.iter_mut().partition_in_place(p);
+ assert_eq!(expected, i);
+ assert!(xs[..i].iter().all(p));
+ assert!(!xs[i..].iter().any(p));
+ assert!(xs.iter().is_partitioned(p));
+ if i == 0 || i == xs.len() {
+ assert!(xs.iter().rev().is_partitioned(p));
+ } else {
+ assert!(!xs.iter().rev().is_partitioned(p));
+ }
+ }
+
+ check(&mut [], |_| true, 0);
+ check(&mut [], |_| false, 0);
+
+ check(&mut [0], |_| true, 1);
+ check(&mut [0], |_| false, 0);
+
+ check(&mut [-1, 1], |&x| x > 0, 1);
+ check(&mut [-1, 1], |&x| x < 0, 1);
+
+ let ref mut xs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ check(xs, |_| true, 10);
+ check(xs, |_| false, 0);
+ check(xs, |&x| x % 2 == 0, 5); // evens
+ check(xs, |&x| x % 2 == 1, 5); // odds
+ check(xs, |&x| x % 3 == 0, 4); // multiple of 3
+ check(xs, |&x| x % 4 == 0, 3); // multiple of 4
+ check(xs, |&x| x % 5 == 0, 2); // multiple of 5
+ check(xs, |&x| x < 3, 3); // small
+ check(xs, |&x| x > 6, 3); // large
+}
+
+/// An iterator that panics whenever `next` or next_back` is called
+/// after `None` has already been returned. This does not violate
+/// `Iterator`'s contract. Used to test that iterator adaptors don't
+/// poll their inner iterators after exhausting them.
+struct NonFused<I> {
+ iter: I,
+ done: bool,
+}
+
+impl<I> NonFused<I> {
+ fn new(iter: I) -> Self {
+ Self { iter, done: false }
+ }
+}
+
+impl<I> Iterator for NonFused<I>
+where
+ I: Iterator,
+{
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ assert!(!self.done, "this iterator has already returned None");
+ self.iter.next().or_else(|| {
+ self.done = true;
+ None
+ })
+ }
+}
+
+impl<I> DoubleEndedIterator for NonFused<I>
+where
+ I: DoubleEndedIterator,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ assert!(!self.done, "this iterator has already returned None");
+ self.iter.next_back().or_else(|| {
+ self.done = true;
+ None
+ })
+ }
+}
+
+#[test]
+fn test_peekable_non_fused() {
+ let mut iter = NonFused::new(empty::<i32>()).peekable();
+
+ assert_eq!(iter.peek(), None);
+ assert_eq!(iter.next_back(), None);
+}
+
+#[test]
+fn test_flatten_non_fused_outer() {
+ let mut iter = NonFused::new(once(0..2)).flatten();
+
+ assert_eq!(iter.next_back(), Some(1));
+ assert_eq!(iter.next(), Some(0));
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn test_flatten_non_fused_inner() {
+ let mut iter = once(0..1).chain(once(1..3)).flat_map(NonFused::new);
+
+ assert_eq!(iter.next_back(), Some(2));
+ assert_eq!(iter.next(), Some(0));
+ assert_eq!(iter.next(), Some(1));
+ assert_eq!(iter.next(), None);
+}
--- /dev/null
+use core::{
+ cell::Cell,
+ lazy::{Lazy, OnceCell},
+ sync::atomic::{AtomicUsize, Ordering::SeqCst},
+};
+
+#[test]
+fn once_cell() {
+ let c = OnceCell::new();
+ assert!(c.get().is_none());
+ c.get_or_init(|| 92);
+ assert_eq!(c.get(), Some(&92));
+
+ c.get_or_init(|| panic!("Kabom!"));
+ assert_eq!(c.get(), Some(&92));
+}
+
+#[test]
+fn once_cell_get_mut() {
+ let mut c = OnceCell::new();
+ assert!(c.get_mut().is_none());
+ c.set(90).unwrap();
+ *c.get_mut().unwrap() += 2;
+ assert_eq!(c.get_mut(), Some(&mut 92));
+}
+
+#[test]
+fn once_cell_drop() {
+ static DROP_CNT: AtomicUsize = AtomicUsize::new(0);
+ struct Dropper;
+ impl Drop for Dropper {
+ fn drop(&mut self) {
+ DROP_CNT.fetch_add(1, SeqCst);
+ }
+ }
+
+ let x = OnceCell::new();
+ x.get_or_init(|| Dropper);
+ assert_eq!(DROP_CNT.load(SeqCst), 0);
+ drop(x);
+ assert_eq!(DROP_CNT.load(SeqCst), 1);
+}
+
+#[test]
+fn unsync_once_cell_drop_empty() {
+ let x = OnceCell::<&'static str>::new();
+ drop(x);
+}
+
+#[test]
+fn clone() {
+ let s = OnceCell::new();
+ let c = s.clone();
+ assert!(c.get().is_none());
+
+ s.set("hello").unwrap();
+ let c = s.clone();
+ assert_eq!(c.get().map(|c| *c), Some("hello"));
+}
+
+#[test]
+fn from_impl() {
+ assert_eq!(OnceCell::from("value").get(), Some(&"value"));
+ assert_ne!(OnceCell::from("foo").get(), Some(&"bar"));
+}
+
+#[test]
+fn partialeq_impl() {
+ assert!(OnceCell::from("value") == OnceCell::from("value"));
+ assert!(OnceCell::from("foo") != OnceCell::from("bar"));
+
+ assert!(OnceCell::<&'static str>::new() == OnceCell::new());
+ assert!(OnceCell::<&'static str>::new() != OnceCell::from("value"));
+}
+
+#[test]
+fn into_inner() {
+ let cell: OnceCell<&'static str> = OnceCell::new();
+ assert_eq!(cell.into_inner(), None);
+ let cell = OnceCell::new();
+ cell.set("hello").unwrap();
+ assert_eq!(cell.into_inner(), Some("hello"));
+}
+
+#[test]
+fn lazy_new() {
+ let called = Cell::new(0);
+ let x = Lazy::new(|| {
+ called.set(called.get() + 1);
+ 92
+ });
+
+ assert_eq!(called.get(), 0);
+
+ let y = *x - 30;
+ assert_eq!(y, 62);
+ assert_eq!(called.get(), 1);
+
+ let y = *x - 30;
+ assert_eq!(y, 62);
+ assert_eq!(called.get(), 1);
+}
+
+#[test]
+fn aliasing_in_get() {
+ let x = OnceCell::new();
+ x.set(42).unwrap();
+ let at_x = x.get().unwrap(); // --- (shared) borrow of inner `Option<T>` --+
+ let _ = x.set(27); // <-- temporary (unique) borrow of inner `Option<T>` |
+ println!("{}", at_x); // <------- up until here ---------------------------+
+}
+
+#[test]
+#[should_panic(expected = "reentrant init")]
+fn reentrant_init() {
+ let x: OnceCell<Box<i32>> = OnceCell::new();
+ let dangling_ref: Cell<Option<&i32>> = Cell::new(None);
+ x.get_or_init(|| {
+ let r = x.get_or_init(|| Box::new(92));
+ dangling_ref.set(Some(r));
+ Box::new(62)
+ });
+ eprintln!("use after free: {:?}", dangling_ref.get().unwrap());
+}
+
+#[test]
+fn dropck() {
+ let cell = OnceCell::new();
+ {
+ let s = String::new();
+ cell.set(&s).unwrap();
+ }
+}
--- /dev/null
+#![feature(alloc_layout_extra)]
+#![feature(array_chunks)]
+#![feature(array_from_ref)]
+#![feature(array_methods)]
+#![feature(array_map)]
+#![feature(array_windows)]
+#![feature(bool_to_option)]
+#![feature(bound_cloned)]
+#![feature(box_syntax)]
+#![feature(cell_update)]
+#![feature(const_assume)]
+#![feature(const_cell_into_inner)]
+#![feature(core_intrinsics)]
+#![feature(core_private_bignum)]
+#![feature(core_private_diy_float)]
+#![feature(debug_non_exhaustive)]
+#![feature(dec2flt)]
+#![feature(div_duration)]
+#![feature(duration_consts_2)]
+#![feature(duration_constants)]
+#![feature(duration_saturating_ops)]
+#![feature(duration_zero)]
+#![feature(exact_size_is_empty)]
+#![feature(fixed_size_array)]
+#![feature(flt2dec)]
+#![feature(fmt_internals)]
+#![feature(hashmap_internals)]
+#![feature(try_find)]
+#![feature(is_sorted)]
+#![feature(pattern)]
+#![feature(raw)]
+#![feature(sort_internals)]
+#![feature(slice_partition_at_index)]
+#![feature(min_specialization)]
+#![feature(step_trait)]
+#![feature(step_trait_ext)]
+#![feature(str_internals)]
+#![feature(test)]
+#![feature(trusted_len)]
+#![feature(try_trait)]
+#![feature(slice_internals)]
+#![feature(slice_partition_dedup)]
+#![feature(int_error_matching)]
+#![feature(array_value_iter)]
+#![feature(iter_advance_by)]
+#![feature(iter_partition_in_place)]
+#![feature(iter_is_partitioned)]
+#![feature(iter_order_by)]
+#![feature(cmp_min_max_by)]
+#![feature(iter_map_while)]
+#![feature(const_mut_refs)]
+#![feature(const_pin)]
+#![feature(const_slice_from_raw_parts)]
+#![feature(const_raw_ptr_deref)]
+#![feature(never_type)]
+#![feature(unwrap_infallible)]
+#![feature(option_unwrap_none)]
+#![feature(peekable_next_if)]
+#![feature(partition_point)]
+#![feature(once_cell)]
+#![feature(unsafe_block_in_unsafe_fn)]
+#![feature(int_bits_const)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+extern crate test;
+
+mod alloc;
+mod any;
+mod array;
+mod ascii;
+mod atomic;
+mod bool;
+mod cell;
+mod char;
+mod clone;
+mod cmp;
+mod fmt;
+mod hash;
+mod intrinsics;
+mod iter;
+mod lazy;
+mod manually_drop;
+mod mem;
+mod nonzero;
+mod num;
+mod ops;
+mod option;
+mod pattern;
+mod pin;
+mod ptr;
+mod result;
+mod slice;
+mod str;
+mod str_lossy;
+mod task;
+mod time;
+mod tuple;
--- /dev/null
+use core::mem::ManuallyDrop;
+
+#[test]
+fn smoke() {
+ struct TypeWithDrop;
+ impl Drop for TypeWithDrop {
+ fn drop(&mut self) {
+ unreachable!("Should not get dropped");
+ }
+ }
+
+ let x = ManuallyDrop::new(TypeWithDrop);
+ drop(x);
+
+ // also test unsizing
+ let x: Box<ManuallyDrop<[TypeWithDrop]>> =
+ Box::new(ManuallyDrop::new([TypeWithDrop, TypeWithDrop]));
+ drop(x);
+}
--- /dev/null
+use core::mem::*;
+
+#[test]
+fn size_of_basic() {
+ assert_eq!(size_of::<u8>(), 1);
+ assert_eq!(size_of::<u16>(), 2);
+ assert_eq!(size_of::<u32>(), 4);
+ assert_eq!(size_of::<u64>(), 8);
+}
+
+#[test]
+#[cfg(target_pointer_width = "16")]
+fn size_of_16() {
+ assert_eq!(size_of::<usize>(), 2);
+ assert_eq!(size_of::<*const usize>(), 2);
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn size_of_32() {
+ assert_eq!(size_of::<usize>(), 4);
+ assert_eq!(size_of::<*const usize>(), 4);
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn size_of_64() {
+ assert_eq!(size_of::<usize>(), 8);
+ assert_eq!(size_of::<*const usize>(), 8);
+}
+
+#[test]
+fn size_of_val_basic() {
+ assert_eq!(size_of_val(&1u8), 1);
+ assert_eq!(size_of_val(&1u16), 2);
+ assert_eq!(size_of_val(&1u32), 4);
+ assert_eq!(size_of_val(&1u64), 8);
+}
+
+#[test]
+fn align_of_basic() {
+ assert_eq!(align_of::<u8>(), 1);
+ assert_eq!(align_of::<u16>(), 2);
+ assert_eq!(align_of::<u32>(), 4);
+}
+
+#[test]
+#[cfg(target_pointer_width = "16")]
+fn align_of_16() {
+ assert_eq!(align_of::<usize>(), 2);
+ assert_eq!(align_of::<*const usize>(), 2);
+}
+
+#[test]
+#[cfg(target_pointer_width = "32")]
+fn align_of_32() {
+ assert_eq!(align_of::<usize>(), 4);
+ assert_eq!(align_of::<*const usize>(), 4);
+}
+
+#[test]
+#[cfg(target_pointer_width = "64")]
+fn align_of_64() {
+ assert_eq!(align_of::<usize>(), 8);
+ assert_eq!(align_of::<*const usize>(), 8);
+}
+
+#[test]
+fn align_of_val_basic() {
+ assert_eq!(align_of_val(&1u8), 1);
+ assert_eq!(align_of_val(&1u16), 2);
+ assert_eq!(align_of_val(&1u32), 4);
+}
+
+#[test]
+fn test_swap() {
+ let mut x = 31337;
+ let mut y = 42;
+ swap(&mut x, &mut y);
+ assert_eq!(x, 42);
+ assert_eq!(y, 31337);
+}
+
+#[test]
+fn test_replace() {
+ let mut x = Some("test".to_string());
+ let y = replace(&mut x, None);
+ assert!(x.is_none());
+ assert!(y.is_some());
+}
+
+#[test]
+fn test_transmute_copy() {
+ assert_eq!(1, unsafe { transmute_copy(&1) });
+}
+
+#[test]
+fn test_transmute() {
+ trait Foo {
+ fn dummy(&self) {}
+ }
+ impl Foo for isize {}
+
+ let a = box 100isize as Box<dyn Foo>;
+ unsafe {
+ let x: ::core::raw::TraitObject = transmute(a);
+ assert!(*(x.data as *const isize) == 100);
+ let _x: Box<dyn Foo> = transmute(x);
+ }
+
+ unsafe {
+ assert_eq!(transmute::<_, Vec<u8>>("L".to_string()), [76]);
+ }
+}
+
+#[test]
+#[allow(dead_code)]
+fn test_discriminant_send_sync() {
+ enum Regular {
+ A,
+ B(i32),
+ }
+ enum NotSendSync {
+ A(*const i32),
+ }
+
+ fn is_send_sync<T: Send + Sync>() {}
+
+ is_send_sync::<Discriminant<Regular>>();
+ is_send_sync::<Discriminant<NotSendSync>>();
+}
--- /dev/null
+use core::convert::TryFrom;
+use core::num::{IntErrorKind, NonZeroI32, NonZeroI8, NonZeroU32, NonZeroU8};
+use core::option::Option::{self, None, Some};
+use std::mem::size_of;
+
+#[test]
+fn test_create_nonzero_instance() {
+ let _a = unsafe { NonZeroU32::new_unchecked(21) };
+}
+
+#[test]
+fn test_size_nonzero_in_option() {
+ assert_eq!(size_of::<NonZeroU32>(), size_of::<Option<NonZeroU32>>());
+ assert_eq!(size_of::<NonZeroI32>(), size_of::<Option<NonZeroI32>>());
+}
+
+#[test]
+fn test_match_on_nonzero_option() {
+ let a = Some(unsafe { NonZeroU32::new_unchecked(42) });
+ match a {
+ Some(val) => assert_eq!(val.get(), 42),
+ None => panic!("unexpected None while matching on Some(NonZeroU32(_))"),
+ }
+
+ match unsafe { Some(NonZeroU32::new_unchecked(43)) } {
+ Some(val) => assert_eq!(val.get(), 43),
+ None => panic!("unexpected None while matching on Some(NonZeroU32(_))"),
+ }
+}
+
+#[test]
+fn test_match_option_empty_vec() {
+ let a: Option<Vec<isize>> = Some(vec![]);
+ match a {
+ None => panic!("unexpected None while matching on Some(vec![])"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_vec() {
+ let a = Some(vec![1, 2, 3, 4]);
+ match a {
+ Some(v) => assert_eq!(v, [1, 2, 3, 4]),
+ None => panic!("unexpected None while matching on Some(vec![1, 2, 3, 4])"),
+ }
+}
+
+#[test]
+fn test_match_option_rc() {
+ use std::rc::Rc;
+
+ let five = Rc::new(5);
+ match Some(five) {
+ Some(r) => assert_eq!(*r, 5),
+ None => panic!("unexpected None while matching on Some(Rc::new(5))"),
+ }
+}
+
+#[test]
+fn test_match_option_arc() {
+ use std::sync::Arc;
+
+ let five = Arc::new(5);
+ match Some(five) {
+ Some(a) => assert_eq!(*a, 5),
+ None => panic!("unexpected None while matching on Some(Arc::new(5))"),
+ }
+}
+
+#[test]
+fn test_match_option_empty_string() {
+ let a = Some(String::new());
+ match a {
+ None => panic!("unexpected None while matching on Some(String::new())"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_string() {
+ let five = "Five".to_string();
+ match Some(five) {
+ Some(s) => assert_eq!(s, "Five"),
+ None => panic!("unexpected None while matching on Some(String { ... })"),
+ }
+}
+
+mod atom {
+ use core::num::NonZeroU32;
+
+ #[derive(PartialEq, Eq)]
+ pub struct Atom {
+ index: NonZeroU32, // private
+ }
+ pub const FOO_ATOM: Atom = Atom { index: unsafe { NonZeroU32::new_unchecked(7) } };
+}
+
+macro_rules! atom {
+ ("foo") => {
+ atom::FOO_ATOM
+ };
+}
+
+#[test]
+fn test_match_nonzero_const_pattern() {
+ match atom!("foo") {
+ // Using as a pattern is supported by the compiler:
+ atom!("foo") => {}
+ _ => panic!("Expected the const item as a pattern to match."),
+ }
+}
+
+#[test]
+fn test_from_nonzero() {
+ let nz = NonZeroU32::new(1).unwrap();
+ let num: u32 = nz.into();
+ assert_eq!(num, 1u32);
+}
+
+#[test]
+fn test_from_signed_nonzero() {
+ let nz = NonZeroI32::new(1).unwrap();
+ let num: i32 = nz.into();
+ assert_eq!(num, 1i32);
+}
+
+#[test]
+fn test_from_str() {
+ assert_eq!("123".parse::<NonZeroU8>(), Ok(NonZeroU8::new(123).unwrap()));
+ assert_eq!("0".parse::<NonZeroU8>().err().map(|e| e.kind().clone()), Some(IntErrorKind::Zero));
+ assert_eq!(
+ "-1".parse::<NonZeroU8>().err().map(|e| e.kind().clone()),
+ Some(IntErrorKind::InvalidDigit)
+ );
+ assert_eq!(
+ "-129".parse::<NonZeroI8>().err().map(|e| e.kind().clone()),
+ Some(IntErrorKind::NegOverflow)
+ );
+ assert_eq!(
+ "257".parse::<NonZeroU8>().err().map(|e| e.kind().clone()),
+ Some(IntErrorKind::PosOverflow)
+ );
+}
+
+#[test]
+fn test_nonzero_bitor() {
+ let nz_alt = NonZeroU8::new(0b1010_1010).unwrap();
+ let nz_low = NonZeroU8::new(0b0000_1111).unwrap();
+
+ let both_nz: NonZeroU8 = nz_alt | nz_low;
+ assert_eq!(both_nz.get(), 0b1010_1111);
+
+ let rhs_int: NonZeroU8 = nz_low | 0b1100_0000u8;
+ assert_eq!(rhs_int.get(), 0b1100_1111);
+
+ let rhs_zero: NonZeroU8 = nz_alt | 0u8;
+ assert_eq!(rhs_zero.get(), 0b1010_1010);
+
+ let lhs_int: NonZeroU8 = 0b0110_0110u8 | nz_alt;
+ assert_eq!(lhs_int.get(), 0b1110_1110);
+
+ let lhs_zero: NonZeroU8 = 0u8 | nz_low;
+ assert_eq!(lhs_zero.get(), 0b0000_1111);
+}
+
+#[test]
+fn test_nonzero_bitor_assign() {
+ let mut target = NonZeroU8::new(0b1010_1010).unwrap();
+
+ target |= NonZeroU8::new(0b0000_1111).unwrap();
+ assert_eq!(target.get(), 0b1010_1111);
+
+ target |= 0b0001_0000;
+ assert_eq!(target.get(), 0b1011_1111);
+
+ target |= 0;
+ assert_eq!(target.get(), 0b1011_1111);
+}
+
+#[test]
+fn test_nonzero_from_int_on_success() {
+ assert_eq!(NonZeroU8::try_from(5), Ok(NonZeroU8::new(5).unwrap()));
+ assert_eq!(NonZeroU32::try_from(5), Ok(NonZeroU32::new(5).unwrap()));
+
+ assert_eq!(NonZeroI8::try_from(-5), Ok(NonZeroI8::new(-5).unwrap()));
+ assert_eq!(NonZeroI32::try_from(-5), Ok(NonZeroI32::new(-5).unwrap()));
+}
+
+#[test]
+fn test_nonzero_from_int_on_err() {
+ assert!(NonZeroU8::try_from(0).is_err());
+ assert!(NonZeroU32::try_from(0).is_err());
+
+ assert!(NonZeroI8::try_from(0).is_err());
+ assert!(NonZeroI32::try_from(0).is_err());
+}
+
+#[test]
+fn nonzero_const() {
+ // test that the methods of `NonZeroX>` are usable in a const context
+ // Note: only tests NonZero8
+
+ const NONZERO: NonZeroU8 = unsafe { NonZeroU8::new_unchecked(5) };
+
+ const GET: u8 = NONZERO.get();
+ assert_eq!(GET, 5);
+
+ const ZERO: Option<NonZeroU8> = NonZeroU8::new(0);
+ assert!(ZERO.is_none());
+
+ const ONE: Option<NonZeroU8> = NonZeroU8::new(1);
+ assert!(ONE.is_some());
+}
--- /dev/null
+use core::num::bignum::tests::Big8x3 as Big;
+
+#[test]
+#[should_panic]
+fn test_from_u64_overflow() {
+ Big::from_u64(0x1000000);
+}
+
+#[test]
+fn test_add() {
+ assert_eq!(*Big::from_small(3).add(&Big::from_small(4)), Big::from_small(7));
+ assert_eq!(*Big::from_small(3).add(&Big::from_small(0)), Big::from_small(3));
+ assert_eq!(*Big::from_small(0).add(&Big::from_small(3)), Big::from_small(3));
+ assert_eq!(*Big::from_small(3).add(&Big::from_u64(0xfffe)), Big::from_u64(0x10001));
+ assert_eq!(*Big::from_u64(0xfedc).add(&Big::from_u64(0x789)), Big::from_u64(0x10665));
+ assert_eq!(*Big::from_u64(0x789).add(&Big::from_u64(0xfedc)), Big::from_u64(0x10665));
+}
+
+#[test]
+#[should_panic]
+fn test_add_overflow_1() {
+ Big::from_small(1).add(&Big::from_u64(0xffffff));
+}
+
+#[test]
+#[should_panic]
+fn test_add_overflow_2() {
+ Big::from_u64(0xffffff).add(&Big::from_small(1));
+}
+
+#[test]
+fn test_add_small() {
+ assert_eq!(*Big::from_small(3).add_small(4), Big::from_small(7));
+ assert_eq!(*Big::from_small(3).add_small(0), Big::from_small(3));
+ assert_eq!(*Big::from_small(0).add_small(3), Big::from_small(3));
+ assert_eq!(*Big::from_small(7).add_small(250), Big::from_u64(257));
+ assert_eq!(*Big::from_u64(0x7fff).add_small(1), Big::from_u64(0x8000));
+ assert_eq!(*Big::from_u64(0x2ffe).add_small(0x35), Big::from_u64(0x3033));
+ assert_eq!(*Big::from_small(0xdc).add_small(0x89), Big::from_u64(0x165));
+}
+
+#[test]
+#[should_panic]
+fn test_add_small_overflow() {
+ Big::from_u64(0xffffff).add_small(1);
+}
+
+#[test]
+fn test_sub() {
+ assert_eq!(*Big::from_small(7).sub(&Big::from_small(4)), Big::from_small(3));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0x789)), Big::from_u64(0xfedc));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0xfedc)), Big::from_u64(0x789));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0x10664)), Big::from_small(1));
+ assert_eq!(*Big::from_u64(0x10665).sub(&Big::from_u64(0x10665)), Big::from_small(0));
+}
+
+#[test]
+#[should_panic]
+fn test_sub_underflow_1() {
+ Big::from_u64(0x10665).sub(&Big::from_u64(0x10666));
+}
+
+#[test]
+#[should_panic]
+fn test_sub_underflow_2() {
+ Big::from_small(0).sub(&Big::from_u64(0x123456));
+}
+
+#[test]
+fn test_mul_small() {
+ assert_eq!(*Big::from_small(7).mul_small(5), Big::from_small(35));
+ assert_eq!(*Big::from_small(0xff).mul_small(0xff), Big::from_u64(0xfe01));
+ assert_eq!(*Big::from_u64(0xffffff / 13).mul_small(13), Big::from_u64(0xffffff));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_small_overflow() {
+ Big::from_u64(0x800000).mul_small(2);
+}
+
+#[test]
+fn test_mul_pow2() {
+ assert_eq!(*Big::from_small(0x7).mul_pow2(4), Big::from_small(0x70));
+ assert_eq!(*Big::from_small(0xff).mul_pow2(1), Big::from_u64(0x1fe));
+ assert_eq!(*Big::from_small(0xff).mul_pow2(12), Big::from_u64(0xff000));
+ assert_eq!(*Big::from_small(0x1).mul_pow2(23), Big::from_u64(0x800000));
+ assert_eq!(*Big::from_u64(0x123).mul_pow2(0), Big::from_u64(0x123));
+ assert_eq!(*Big::from_u64(0x123).mul_pow2(7), Big::from_u64(0x9180));
+ assert_eq!(*Big::from_u64(0x123).mul_pow2(15), Big::from_u64(0x918000));
+ assert_eq!(*Big::from_small(0).mul_pow2(23), Big::from_small(0));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow2_overflow_1() {
+ Big::from_u64(0x1).mul_pow2(24);
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow2_overflow_2() {
+ Big::from_u64(0x123).mul_pow2(16);
+}
+
+#[test]
+fn test_mul_pow5() {
+ assert_eq!(*Big::from_small(42).mul_pow5(0), Big::from_small(42));
+ assert_eq!(*Big::from_small(1).mul_pow5(2), Big::from_small(25));
+ assert_eq!(*Big::from_small(1).mul_pow5(4), Big::from_u64(25 * 25));
+ assert_eq!(*Big::from_small(4).mul_pow5(3), Big::from_u64(500));
+ assert_eq!(*Big::from_small(140).mul_pow5(2), Big::from_u64(25 * 140));
+ assert_eq!(*Big::from_small(25).mul_pow5(1), Big::from_small(125));
+ assert_eq!(*Big::from_small(125).mul_pow5(7), Big::from_u64(9765625));
+ assert_eq!(*Big::from_small(0).mul_pow5(127), Big::from_small(0));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow5_overflow_1() {
+ Big::from_small(1).mul_pow5(12);
+}
+
+#[test]
+#[should_panic]
+fn test_mul_pow5_overflow_2() {
+ Big::from_small(230).mul_pow5(8);
+}
+
+#[test]
+fn test_mul_digits() {
+ assert_eq!(*Big::from_small(3).mul_digits(&[5]), Big::from_small(15));
+ assert_eq!(*Big::from_small(0xff).mul_digits(&[0xff]), Big::from_u64(0xfe01));
+ assert_eq!(*Big::from_u64(0x123).mul_digits(&[0x56, 0x4]), Big::from_u64(0x4edc2));
+ assert_eq!(*Big::from_u64(0x12345).mul_digits(&[0x67]), Big::from_u64(0x7530c3));
+ assert_eq!(*Big::from_small(0x12).mul_digits(&[0x67, 0x45, 0x3]), Big::from_u64(0x3ae13e));
+ assert_eq!(*Big::from_u64(0xffffff / 13).mul_digits(&[13]), Big::from_u64(0xffffff));
+ assert_eq!(*Big::from_small(13).mul_digits(&[0x3b, 0xb1, 0x13]), Big::from_u64(0xffffff));
+}
+
+#[test]
+#[should_panic]
+fn test_mul_digits_overflow_1() {
+ Big::from_u64(0x800000).mul_digits(&[2]);
+}
+
+#[test]
+#[should_panic]
+fn test_mul_digits_overflow_2() {
+ Big::from_u64(0x1000).mul_digits(&[0, 0x10]);
+}
+
+#[test]
+fn test_div_rem_small() {
+ let as_val = |(q, r): (&mut Big, u8)| (q.clone(), r);
+ assert_eq!(as_val(Big::from_small(0xff).div_rem_small(15)), (Big::from_small(17), 0));
+ assert_eq!(as_val(Big::from_small(0xff).div_rem_small(16)), (Big::from_small(15), 15));
+ assert_eq!(as_val(Big::from_small(3).div_rem_small(40)), (Big::from_small(0), 3));
+ assert_eq!(
+ as_val(Big::from_u64(0xffffff).div_rem_small(123)),
+ (Big::from_u64(0xffffff / 123), (0xffffffu64 % 123) as u8)
+ );
+ assert_eq!(
+ as_val(Big::from_u64(0x10000).div_rem_small(123)),
+ (Big::from_u64(0x10000 / 123), (0x10000u64 % 123) as u8)
+ );
+}
+
+#[test]
+fn test_div_rem() {
+ fn div_rem(n: u64, d: u64) -> (Big, Big) {
+ let mut q = Big::from_small(42);
+ let mut r = Big::from_small(42);
+ Big::from_u64(n).div_rem(&Big::from_u64(d), &mut q, &mut r);
+ (q, r)
+ }
+ assert_eq!(div_rem(1, 1), (Big::from_small(1), Big::from_small(0)));
+ assert_eq!(div_rem(4, 3), (Big::from_small(1), Big::from_small(1)));
+ assert_eq!(div_rem(1, 7), (Big::from_small(0), Big::from_small(1)));
+ assert_eq!(div_rem(45, 9), (Big::from_small(5), Big::from_small(0)));
+ assert_eq!(div_rem(103, 9), (Big::from_small(11), Big::from_small(4)));
+ assert_eq!(div_rem(123456, 77), (Big::from_u64(1603), Big::from_small(25)));
+ assert_eq!(div_rem(0xffff, 1), (Big::from_u64(0xffff), Big::from_small(0)));
+ assert_eq!(div_rem(0xeeee, 0xffff), (Big::from_small(0), Big::from_u64(0xeeee)));
+ assert_eq!(div_rem(2_000_000, 2), (Big::from_u64(1_000_000), Big::from_u64(0)));
+}
+
+#[test]
+fn test_is_zero() {
+ assert!(Big::from_small(0).is_zero());
+ assert!(!Big::from_small(3).is_zero());
+ assert!(!Big::from_u64(0x123).is_zero());
+ assert!(!Big::from_u64(0xffffff).sub(&Big::from_u64(0xfffffe)).is_zero());
+ assert!(Big::from_u64(0xffffff).sub(&Big::from_u64(0xffffff)).is_zero());
+}
+
+#[test]
+fn test_get_bit() {
+ let x = Big::from_small(0b1101);
+ assert_eq!(x.get_bit(0), 1);
+ assert_eq!(x.get_bit(1), 0);
+ assert_eq!(x.get_bit(2), 1);
+ assert_eq!(x.get_bit(3), 1);
+ let y = Big::from_u64(1 << 15);
+ assert_eq!(y.get_bit(14), 0);
+ assert_eq!(y.get_bit(15), 1);
+ assert_eq!(y.get_bit(16), 0);
+}
+
+#[test]
+#[should_panic]
+fn test_get_bit_out_of_range() {
+ Big::from_small(42).get_bit(24);
+}
+
+#[test]
+fn test_bit_length() {
+ assert_eq!(Big::from_small(0).bit_length(), 0);
+ assert_eq!(Big::from_small(1).bit_length(), 1);
+ assert_eq!(Big::from_small(5).bit_length(), 3);
+ assert_eq!(Big::from_small(0x18).bit_length(), 5);
+ assert_eq!(Big::from_u64(0x4073).bit_length(), 15);
+ assert_eq!(Big::from_u64(0xffffff).bit_length(), 24);
+}
+
+#[test]
+fn test_ord() {
+ assert!(Big::from_u64(0) < Big::from_u64(0xffffff));
+ assert!(Big::from_u64(0x102) < Big::from_u64(0x201));
+}
+
+#[test]
+fn test_fmt() {
+ assert_eq!(format!("{:?}", Big::from_u64(0)), "0x0");
+ assert_eq!(format!("{:?}", Big::from_u64(0x1)), "0x1");
+ assert_eq!(format!("{:?}", Big::from_u64(0x12)), "0x12");
+ assert_eq!(format!("{:?}", Big::from_u64(0x123)), "0x1_23");
+ assert_eq!(format!("{:?}", Big::from_u64(0x1234)), "0x12_34");
+ assert_eq!(format!("{:?}", Big::from_u64(0x12345)), "0x1_23_45");
+ assert_eq!(format!("{:?}", Big::from_u64(0x123456)), "0x12_34_56");
+}
--- /dev/null
+#![allow(overflowing_literals)]
+
+mod parse;
+mod rawfp;
+
+// Take a float literal, turn it into a string in various ways (that are all trusted
+// to be correct) and see if those strings are parsed back to the value of the literal.
+// Requires a *polymorphic literal*, i.e., one that can serve as f64 as well as f32.
+macro_rules! test_literal {
+ ($x: expr) => {{
+ let x32: f32 = $x;
+ let x64: f64 = $x;
+ let inputs = &[stringify!($x).into(), format!("{:?}", x64), format!("{:e}", x64)];
+ for input in inputs {
+ assert_eq!(input.parse(), Ok(x64));
+ assert_eq!(input.parse(), Ok(x32));
+ let neg_input = &format!("-{}", input);
+ assert_eq!(neg_input.parse(), Ok(-x64));
+ assert_eq!(neg_input.parse(), Ok(-x32));
+ }
+ }};
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn ordinary() {
+ test_literal!(1.0);
+ test_literal!(3e-5);
+ test_literal!(0.1);
+ test_literal!(12345.);
+ test_literal!(0.9999999);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ test_literal!(2.2250738585072014e-308);
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn special_code_paths() {
+ test_literal!(36893488147419103229.0); // 2^65 - 3, triggers half-to-even with even significand
+ test_literal!(101e-33); // Triggers the tricky underflow case in AlgorithmM (for f32)
+ test_literal!(1e23); // Triggers AlgorithmR
+ test_literal!(2075e23); // Triggers another path through AlgorithmR
+ test_literal!(8713e-23); // ... and yet another.
+}
+
+#[test]
+fn large() {
+ test_literal!(1e300);
+ test_literal!(123456789.34567e250);
+ test_literal!(943794359898089732078308743689303290943794359843568973207830874368930329.);
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn subnormals() {
+ test_literal!(5e-324);
+ test_literal!(91e-324);
+ test_literal!(1e-322);
+ test_literal!(13245643e-320);
+ test_literal!(2.22507385851e-308);
+ test_literal!(2.1e-308);
+ test_literal!(4.9406564584124654e-324);
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn infinity() {
+ test_literal!(1e400);
+ test_literal!(1e309);
+ test_literal!(2e308);
+ test_literal!(1.7976931348624e308);
+}
+
+#[test]
+fn zero() {
+ test_literal!(0.0);
+ test_literal!(1e-325);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ test_literal!(1e-326);
+ test_literal!(1e-500);
+}
+
+#[test]
+fn fast_path_correct() {
+ // This number triggers the fast path and is handled incorrectly when compiling on
+ // x86 without SSE2 (i.e., using the x87 FPU stack).
+ test_literal!(1.448997445238699);
+}
+
+#[test]
+fn lonely_dot() {
+ assert!(".".parse::<f32>().is_err());
+ assert!(".".parse::<f64>().is_err());
+}
+
+#[test]
+fn exponentiated_dot() {
+ assert!(".e0".parse::<f32>().is_err());
+ assert!(".e0".parse::<f64>().is_err());
+}
+
+#[test]
+fn lonely_sign() {
+ assert!("+".parse::<f32>().is_err());
+ assert!("-".parse::<f64>().is_err());
+}
+
+#[test]
+fn whitespace() {
+ assert!(" 1.0".parse::<f32>().is_err());
+ assert!("1.0 ".parse::<f64>().is_err());
+}
+
+#[test]
+fn nan() {
+ assert!("NaN".parse::<f32>().unwrap().is_nan());
+ assert!("NaN".parse::<f64>().unwrap().is_nan());
+}
+
+#[test]
+fn inf() {
+ assert_eq!("inf".parse(), Ok(f64::INFINITY));
+ assert_eq!("-inf".parse(), Ok(f64::NEG_INFINITY));
+ assert_eq!("inf".parse(), Ok(f32::INFINITY));
+ assert_eq!("-inf".parse(), Ok(f32::NEG_INFINITY));
+}
+
+#[test]
+fn massive_exponent() {
+ let max = i64::MAX;
+ assert_eq!(format!("1e{}000", max).parse(), Ok(f64::INFINITY));
+ assert_eq!(format!("1e-{}000", max).parse(), Ok(0.0));
+ assert_eq!(format!("1e{}000", max).parse(), Ok(f64::INFINITY));
+}
+
+#[test]
+fn borderline_overflow() {
+ let mut s = "0.".to_string();
+ for _ in 0..375 {
+ s.push('3');
+ }
+ // At the time of this writing, this returns Err(..), but this is a bug that should be fixed.
+ // It makes no sense to enshrine that in a test, the important part is that it doesn't panic.
+ let _ = s.parse::<f64>();
+}
--- /dev/null
+use core::num::dec2flt::parse::ParseResult::{Invalid, Valid};
+use core::num::dec2flt::parse::{parse_decimal, Decimal};
+
+#[test]
+fn missing_pieces() {
+ let permutations = &[".e", "1e", "e4", "e", ".12e", "321.e", "32.12e+", "12.32e-"];
+ for &s in permutations {
+ assert_eq!(parse_decimal(s), Invalid);
+ }
+}
+
+#[test]
+fn invalid_chars() {
+ let invalid = "r,?<j";
+ let valid_strings = &["123", "666.", ".1", "5e1", "7e-3", "0.0e+1"];
+ for c in invalid.chars() {
+ for s in valid_strings {
+ for i in 0..s.len() {
+ let mut input = String::new();
+ input.push_str(s);
+ input.insert(i, c);
+ assert!(parse_decimal(&input) == Invalid, "did not reject invalid {:?}", input);
+ }
+ }
+ }
+}
+
+#[test]
+fn valid() {
+ assert_eq!(parse_decimal("123.456e789"), Valid(Decimal::new(b"123", b"456", 789)));
+ assert_eq!(parse_decimal("123.456e+789"), Valid(Decimal::new(b"123", b"456", 789)));
+ assert_eq!(parse_decimal("123.456e-789"), Valid(Decimal::new(b"123", b"456", -789)));
+ assert_eq!(parse_decimal(".050"), Valid(Decimal::new(b"", b"050", 0)));
+ assert_eq!(parse_decimal("999"), Valid(Decimal::new(b"999", b"", 0)));
+ assert_eq!(parse_decimal("1.e300"), Valid(Decimal::new(b"1", b"", 300)));
+ assert_eq!(parse_decimal(".1e300"), Valid(Decimal::new(b"", b"1", 300)));
+ assert_eq!(parse_decimal("101e-33"), Valid(Decimal::new(b"101", b"", -33)));
+ let zeros = "0".repeat(25);
+ let s = format!("1.5e{}", zeros);
+ assert_eq!(parse_decimal(&s), Valid(Decimal::new(b"1", b"5", 0)));
+}
--- /dev/null
+use core::num::dec2flt::rawfp::RawFloat;
+use core::num::dec2flt::rawfp::{fp_to_float, next_float, prev_float, round_normal};
+use core::num::diy_float::Fp;
+
+fn integer_decode(f: f64) -> (u64, i16, i8) {
+ RawFloat::integer_decode(f)
+}
+
+#[test]
+fn fp_to_float_half_to_even() {
+ fn is_normalized(sig: u64) -> bool {
+ // intentionally written without {min,max}_sig() as a sanity check
+ sig >> 52 == 1 && sig >> 53 == 0
+ }
+
+ fn conv(sig: u64) -> u64 {
+ // The significands are perfectly in range, so the exponent should not matter
+ let (m1, e1, _) = integer_decode(fp_to_float::<f64>(Fp { f: sig, e: 0 }));
+ assert_eq!(e1, 0 + 64 - 53);
+ let (m2, e2, _) = integer_decode(fp_to_float::<f64>(Fp { f: sig, e: 55 }));
+ assert_eq!(e2, 55 + 64 - 53);
+ assert_eq!(m2, m1);
+ let (m3, e3, _) = integer_decode(fp_to_float::<f64>(Fp { f: sig, e: -78 }));
+ assert_eq!(e3, -78 + 64 - 53);
+ assert_eq!(m3, m2);
+ m3
+ }
+
+ let odd = 0x1F_EDCB_A012_345F;
+ let even = odd - 1;
+ assert!(is_normalized(odd));
+ assert!(is_normalized(even));
+ assert_eq!(conv(odd << 11), odd);
+ assert_eq!(conv(even << 11), even);
+ assert_eq!(conv(odd << 11 | 1 << 10), odd + 1);
+ assert_eq!(conv(even << 11 | 1 << 10), even);
+ assert_eq!(conv(even << 11 | 1 << 10 | 1), even + 1);
+ assert_eq!(conv(odd << 11 | 1 << 9), odd);
+ assert_eq!(conv(even << 11 | 1 << 9), even);
+ assert_eq!(conv(odd << 11 | 0x7FF), odd + 1);
+ assert_eq!(conv(even << 11 | 0x7FF), even + 1);
+ assert_eq!(conv(odd << 11 | 0x3FF), odd);
+ assert_eq!(conv(even << 11 | 0x3FF), even);
+}
+
+#[test]
+fn integers_to_f64() {
+ assert_eq!(fp_to_float::<f64>(Fp { f: 1, e: 0 }), 1.0);
+ assert_eq!(fp_to_float::<f64>(Fp { f: 42, e: 7 }), (42 << 7) as f64);
+ assert_eq!(fp_to_float::<f64>(Fp { f: 1 << 20, e: 30 }), (1u64 << 50) as f64);
+ assert_eq!(fp_to_float::<f64>(Fp { f: 4, e: -3 }), 0.5);
+}
+
+const SOME_FLOATS: [f64; 9] = [
+ 0.1f64,
+ 33.568,
+ 42.1e-5,
+ 777.0e9,
+ 1.1111,
+ 0.347997,
+ 9843579834.35892,
+ 12456.0e-150,
+ 54389573.0e-150,
+];
+
+#[test]
+fn human_f64_roundtrip() {
+ for &x in &SOME_FLOATS {
+ let (f, e, _) = integer_decode(x);
+ let fp = Fp { f: f, e: e };
+ assert_eq!(fp_to_float::<f64>(fp), x);
+ }
+}
+
+#[test]
+fn rounding_overflow() {
+ let x = Fp { f: 0xFF_FF_FF_FF_FF_FF_FF_00u64, e: 42 };
+ let rounded = round_normal::<f64>(x);
+ let adjusted_k = x.e + 64 - 53;
+ assert_eq!(rounded.sig, 1 << 52);
+ assert_eq!(rounded.k, adjusted_k + 1);
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn prev_float_monotonic() {
+ let mut x = 1.0;
+ for _ in 0..100 {
+ let x1 = prev_float(x);
+ assert!(x1 < x);
+ assert!(x - x1 < 1e-15);
+ x = x1;
+ }
+}
+
+const MIN_SUBNORMAL: f64 = 5e-324;
+
+#[test]
+fn next_float_zero() {
+ let tiny = next_float(0.0);
+ assert_eq!(tiny, MIN_SUBNORMAL);
+ assert!(tiny != 0.0);
+}
+
+#[test]
+fn next_float_subnormal() {
+ let second = next_float(MIN_SUBNORMAL);
+ // For subnormals, MIN_SUBNORMAL is the ULP
+ assert!(second != MIN_SUBNORMAL);
+ assert!(second > 0.0);
+ assert_eq!(second - MIN_SUBNORMAL, MIN_SUBNORMAL);
+}
+
+#[test]
+fn next_float_inf() {
+ assert_eq!(next_float(f64::MAX), f64::INFINITY);
+ assert_eq!(next_float(f64::INFINITY), f64::INFINITY);
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn next_prev_identity() {
+ for &x in &SOME_FLOATS {
+ assert_eq!(prev_float(next_float(x)), x);
+ assert_eq!(prev_float(prev_float(next_float(next_float(x)))), x);
+ assert_eq!(next_float(prev_float(x)), x);
+ assert_eq!(next_float(next_float(prev_float(prev_float(x)))), x);
+ }
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn next_float_monotonic() {
+ let mut x = 0.49999999999999;
+ assert!(x < 0.5);
+ for _ in 0..200 {
+ let x1 = next_float(x);
+ assert!(x1 > x);
+ assert!(x1 - x < 1e-15, "next_float_monotonic: delta = {:?}", x1 - x);
+ x = x1;
+ }
+ assert!(x > 0.5);
+}
+
+#[test]
+fn test_f32_integer_decode() {
+ assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1));
+ assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1));
+ assert_eq!(2f32.powf(100.0).integer_decode(), (8388608, 77, 1));
+ assert_eq!(0f32.integer_decode(), (0, -150, 1));
+ assert_eq!((-0f32).integer_decode(), (0, -150, -1));
+ assert_eq!(f32::INFINITY.integer_decode(), (8388608, 105, 1));
+ assert_eq!(f32::NEG_INFINITY.integer_decode(), (8388608, 105, -1));
+
+ // Ignore the "sign" (quiet / signalling flag) of NAN.
+ // It can vary between runtime operations and LLVM folding.
+ let (nan_m, nan_e, _nan_s) = f32::NAN.integer_decode();
+ assert_eq!((nan_m, nan_e), (12582912, 105));
+}
+
+#[test]
+fn test_f64_integer_decode() {
+ assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1));
+ assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1));
+ assert_eq!(2f64.powf(100.0).integer_decode(), (4503599627370496, 48, 1));
+ assert_eq!(0f64.integer_decode(), (0, -1075, 1));
+ assert_eq!((-0f64).integer_decode(), (0, -1075, -1));
+ assert_eq!(f64::INFINITY.integer_decode(), (4503599627370496, 972, 1));
+ assert_eq!(f64::NEG_INFINITY.integer_decode(), (4503599627370496, 972, -1));
+
+ // Ignore the "sign" (quiet / signalling flag) of NAN.
+ // It can vary between runtime operations and LLVM folding.
+ let (nan_m, nan_e, _nan_s) = f64::NAN.integer_decode();
+ assert_eq!((nan_m, nan_e), (6755399441055744, 972));
+}
--- /dev/null
+use core::num::flt2dec::estimator::*;
+
+#[test]
+fn test_estimate_scaling_factor() {
+ macro_rules! assert_almost_eq {
+ ($actual:expr, $expected:expr) => {{
+ let actual = $actual;
+ let expected = $expected;
+ println!(
+ "{} - {} = {} - {} = {}",
+ stringify!($expected),
+ stringify!($actual),
+ expected,
+ actual,
+ expected - actual
+ );
+ assert!(
+ expected == actual || expected == actual + 1,
+ "expected {}, actual {}",
+ expected,
+ actual
+ );
+ }};
+ }
+
+ assert_almost_eq!(estimate_scaling_factor(1, 0), 0);
+ assert_almost_eq!(estimate_scaling_factor(2, 0), 1);
+ assert_almost_eq!(estimate_scaling_factor(10, 0), 1);
+ assert_almost_eq!(estimate_scaling_factor(11, 0), 2);
+ assert_almost_eq!(estimate_scaling_factor(100, 0), 2);
+ assert_almost_eq!(estimate_scaling_factor(101, 0), 3);
+ assert_almost_eq!(estimate_scaling_factor(10000000000000000000, 0), 19);
+ assert_almost_eq!(estimate_scaling_factor(10000000000000000001, 0), 20);
+
+ // 1/2^20 = 0.00000095367...
+ assert_almost_eq!(estimate_scaling_factor(1 * 1048576 / 1000000, -20), -6);
+ assert_almost_eq!(estimate_scaling_factor(1 * 1048576 / 1000000 + 1, -20), -5);
+ assert_almost_eq!(estimate_scaling_factor(10 * 1048576 / 1000000, -20), -5);
+ assert_almost_eq!(estimate_scaling_factor(10 * 1048576 / 1000000 + 1, -20), -4);
+ assert_almost_eq!(estimate_scaling_factor(100 * 1048576 / 1000000, -20), -4);
+ assert_almost_eq!(estimate_scaling_factor(100 * 1048576 / 1000000 + 1, -20), -3);
+ assert_almost_eq!(estimate_scaling_factor(1048575, -20), 0);
+ assert_almost_eq!(estimate_scaling_factor(1048576, -20), 0);
+ assert_almost_eq!(estimate_scaling_factor(1048577, -20), 1);
+ assert_almost_eq!(estimate_scaling_factor(10485759999999999999, -20), 13);
+ assert_almost_eq!(estimate_scaling_factor(10485760000000000000, -20), 13);
+ assert_almost_eq!(estimate_scaling_factor(10485760000000000001, -20), 14);
+
+ // extreme values:
+ // 2^-1074 = 4.94065... * 10^-324
+ // (2^53-1) * 2^971 = 1.79763... * 10^308
+ assert_almost_eq!(estimate_scaling_factor(1, -1074), -323);
+ assert_almost_eq!(estimate_scaling_factor(0x1fffffffffffff, 971), 309);
+
+ // Miri is too slow
+ let step = if cfg!(miri) { 37 } else { 1 };
+
+ for i in (-1074..972).step_by(step) {
+ let expected = super::ldexp_f64(1.0, i).log10().ceil();
+ assert_almost_eq!(estimate_scaling_factor(1, i as i16), expected as i16);
+ }
+}
--- /dev/null
+use std::mem::MaybeUninit;
+use std::{fmt, str};
+
+use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
+use core::num::flt2dec::{round_up, Formatted, Part, Sign, MAX_SIG_DIGITS};
+use core::num::flt2dec::{
+ to_exact_exp_str, to_exact_fixed_str, to_shortest_exp_str, to_shortest_str,
+};
+
+pub use test::Bencher;
+
+mod estimator;
+mod strategy {
+ mod dragon;
+ mod grisu;
+}
+mod random;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {:?} instead", full_decoded),
+ }
+}
+
+macro_rules! check_shortest {
+ ($f:ident($v:expr) => $buf:expr, $exp:expr) => (
+ check_shortest!($f($v) => $buf, $exp;
+ "shortest mismatch for v={v}: actual {actual:?}, expected {expected:?}",
+ v = stringify!($v))
+ );
+
+ ($f:ident{$($k:ident: $v:expr),+} => $buf:expr, $exp:expr) => (
+ check_shortest!($f{$($k: $v),+} => $buf, $exp;
+ "shortest mismatch for {v:?}: actual {actual:?}, expected {expected:?}",
+ v = Decoded { $($k: $v),+ })
+ );
+
+ ($f:ident($v:expr) => $buf:expr, $exp:expr; $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let mut buf = [MaybeUninit::new(b'_'); MAX_SIG_DIGITS];
+ let (buf, k) = $f(&decode_finite($v), &mut buf);
+ assert!((buf, k) == ($buf, $exp),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($buf).unwrap(), $exp),
+ $($key = $val),*);
+ });
+
+ ($f:ident{$($k:ident: $v:expr),+} => $buf:expr, $exp:expr;
+ $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let mut buf = [MaybeUninit::new(b'_'); MAX_SIG_DIGITS];
+ let (buf, k) = $f(&Decoded { $($k: $v),+ }, &mut buf);
+ assert!((buf, k) == ($buf, $exp),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($buf).unwrap(), $exp),
+ $($key = $val),*);
+ })
+}
+
+macro_rules! try_exact {
+ ($f:ident($decoded:expr) => $buf:expr, $expected:expr, $expectedk:expr;
+ $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let (buf, k) = $f($decoded, &mut $buf[..$expected.len()], i16::MIN);
+ assert!((buf, k) == ($expected, $expectedk),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($expected).unwrap(), $expectedk),
+ $($key = $val),*);
+ })
+}
+
+macro_rules! try_fixed {
+ ($f:ident($decoded:expr) => $buf:expr, $request:expr, $expected:expr, $expectedk:expr;
+ $fmt:expr, $($key:ident = $val:expr),*) => ({
+ let (buf, k) = $f($decoded, &mut $buf[..], $request);
+ assert!((buf, k) == ($expected, $expectedk),
+ $fmt, actual = (str::from_utf8(buf).unwrap(), k),
+ expected = (str::from_utf8($expected).unwrap(), $expectedk),
+ $($key = $val),*);
+ })
+}
+
+fn ldexp_f32(a: f32, b: i32) -> f32 {
+ ldexp_f64(a as f64, b) as f32
+}
+
+fn ldexp_f64(a: f64, b: i32) -> f64 {
+ extern "C" {
+ fn ldexp(x: f64, n: i32) -> f64;
+ }
+ // SAFETY: assuming a correct `ldexp` has been supplied, the given arguments cannot possibly
+ // cause undefined behavior
+ unsafe { ldexp(a, b) }
+}
+
+fn check_exact<F, T>(mut f: F, v: T, vstr: &str, expected: &[u8], expectedk: i16)
+where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ // use a large enough buffer
+ let mut buf = [MaybeUninit::new(b'_'); 1024];
+ let mut expected_ = [b'_'; 1024];
+
+ let decoded = decode_finite(v);
+ let cut = expected.iter().position(|&c| c == b' ');
+
+ // check significant digits
+ for i in 1..cut.unwrap_or(expected.len() - 1) {
+ expected_[..i].copy_from_slice(&expected[..i]);
+ let mut expectedk_ = expectedk;
+ if expected[i] >= b'5' {
+ // check if this is a rounding-to-even case.
+ // we avoid rounding ...x5000... (with infinite zeroes) to ...(x+1) when x is even.
+ if !(i + 1 < expected.len()
+ && expected[i - 1] & 1 == 0
+ && expected[i] == b'5'
+ && expected[i + 1] == b' ')
+ {
+ // if this returns true, expected_[..i] is all `9`s and being rounded up.
+ // we should always return `100..00` (`i` digits) instead, since that's
+ // what we can came up with `i` digits anyway. `round_up` assumes that
+ // the adjustment to the length is done by caller, which we simply ignore.
+ if let Some(_) = round_up(&mut expected_[..i]) {
+ expectedk_ += 1;
+ }
+ }
+ }
+
+ try_exact!(f(&decoded) => &mut buf, &expected_[..i], expectedk_;
+ "exact sigdigit mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ try_fixed!(f(&decoded) => &mut buf, expectedk_ - i as i16, &expected_[..i], expectedk_;
+ "fixed sigdigit mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ }
+
+ // check exact rounding for zero- and negative-width cases
+ let start;
+ if expected[0] >= b'5' {
+ try_fixed!(f(&decoded) => &mut buf, expectedk, b"1", expectedk + 1;
+ "zero-width rounding-up mismatch for v={v}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr);
+ start = 1;
+ } else {
+ start = 0;
+ }
+ for i in start..-10 {
+ try_fixed!(f(&decoded) => &mut buf, expectedk - i, b"", expectedk;
+ "rounding-down mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = -i);
+ }
+
+ // check infinite zero digits
+ if let Some(cut) = cut {
+ for i in cut..expected.len() - 1 {
+ expected_[..cut].copy_from_slice(&expected[..cut]);
+ for c in &mut expected_[cut..i] {
+ *c = b'0';
+ }
+
+ try_exact!(f(&decoded) => &mut buf, &expected_[..i], expectedk;
+ "exact infzero mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ try_fixed!(f(&decoded) => &mut buf, expectedk - i as i16, &expected_[..i], expectedk;
+ "fixed infzero mismatch for v={v}, i={i}: \
+ actual {actual:?}, expected {expected:?}",
+ v = vstr, i = i);
+ }
+ }
+}
+
+trait TestableFloat: DecodableFloat + fmt::Display {
+ /// Returns `x * 2^exp`. Almost same to `std::{f32,f64}::ldexp`.
+ /// This is used for testing.
+ fn ldexpi(f: i64, exp: isize) -> Self;
+}
+
+impl TestableFloat for f32 {
+ fn ldexpi(f: i64, exp: isize) -> Self {
+ f as Self * (exp as Self).exp2()
+ }
+}
+
+impl TestableFloat for f64 {
+ fn ldexpi(f: i64, exp: isize) -> Self {
+ f as Self * (exp as Self).exp2()
+ }
+}
+
+fn check_exact_one<F, T>(mut f: F, x: i64, e: isize, tstr: &str, expected: &[u8], expectedk: i16)
+where
+ T: TestableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ // use a large enough buffer
+ let mut buf = [MaybeUninit::new(b'_'); 1024];
+ let v: T = TestableFloat::ldexpi(x, e);
+ let decoded = decode_finite(v);
+
+ try_exact!(f(&decoded) => &mut buf, &expected, expectedk;
+ "exact mismatch for v={x}p{e}{t}: actual {actual:?}, expected {expected:?}",
+ x = x, e = e, t = tstr);
+ try_fixed!(f(&decoded) => &mut buf, expectedk - expected.len() as i16, &expected, expectedk;
+ "fixed mismatch for v={x}p{e}{t}: actual {actual:?}, expected {expected:?}",
+ x = x, e = e, t = tstr);
+}
+
+macro_rules! check_exact {
+ ($f:ident($v:expr) => $buf:expr, $exp:expr) => {
+ check_exact(|d, b, k| $f(d, b, k), $v, stringify!($v), $buf, $exp)
+ };
+}
+
+macro_rules! check_exact_one {
+ ($f:ident($x:expr, $e:expr; $t:ty) => $buf:expr, $exp:expr) => {
+ check_exact_one::<_, $t>(|d, b, k| $f(d, b, k), $x, $e, stringify!($t), $buf, $exp)
+ };
+}
+
+// in the following comments, three numbers are spaced by 1 ulp apart,
+// and the second one is being formatted.
+//
+// some tests are derived from [1].
+//
+// [1] Vern Paxson, A Program for Testing IEEE Decimal-Binary Conversion
+// ftp://ftp.ee.lbl.gov/testbase-report.ps.Z
+
+pub fn f32_shortest_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ // 0.0999999940395355224609375
+ // 0.100000001490116119384765625
+ // 0.10000000894069671630859375
+ check_shortest!(f(0.1f32) => b"1", 0);
+
+ // 0.333333313465118408203125
+ // 0.3333333432674407958984375 (1/3 in the default rounding)
+ // 0.33333337306976318359375
+ check_shortest!(f(1.0f32/3.0) => b"33333334", 0);
+
+ // 10^1 * 0.31415917873382568359375
+ // 10^1 * 0.31415920257568359375
+ // 10^1 * 0.31415922641754150390625
+ check_shortest!(f(3.141592f32) => b"3141592", 1);
+
+ // 10^18 * 0.31415916243714048
+ // 10^18 * 0.314159196796878848
+ // 10^18 * 0.314159231156617216
+ check_shortest!(f(3.141592e17f32) => b"3141592", 18);
+
+ // regression test for decoders
+ // 10^8 * 0.3355443
+ // 10^8 * 0.33554432
+ // 10^8 * 0.33554436
+ check_shortest!(f(ldexp_f32(1.0, 25)) => b"33554432", 8);
+
+ // 10^39 * 0.340282326356119256160033759537265639424
+ // 10^39 * 0.34028234663852885981170418348451692544
+ // 10^39 * 0.340282366920938463463374607431768211456
+ check_shortest!(f(f32::MAX) => b"34028235", 39);
+
+ // 10^-37 * 0.1175494210692441075487029444849287348827...
+ // 10^-37 * 0.1175494350822287507968736537222245677818...
+ // 10^-37 * 0.1175494490952133940450443629595204006810...
+ check_shortest!(f(f32::MIN_POSITIVE) => b"11754944", -37);
+
+ // 10^-44 * 0
+ // 10^-44 * 0.1401298464324817070923729583289916131280...
+ // 10^-44 * 0.2802596928649634141847459166579832262560...
+ let minf32 = ldexp_f32(1.0, -149);
+ check_shortest!(f(minf32) => b"1", -44);
+}
+
+pub fn f32_exact_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ let minf32 = ldexp_f32(1.0, -149);
+
+ check_exact!(f(0.1f32) => b"100000001490116119384765625 ", 0);
+ check_exact!(f(0.5f32) => b"5 ", 0);
+ check_exact!(f(1.0f32/3.0) => b"3333333432674407958984375 ", 0);
+ check_exact!(f(3.141592f32) => b"31415920257568359375 ", 1);
+ check_exact!(f(3.141592e17f32) => b"314159196796878848 ", 18);
+ check_exact!(f(f32::MAX) => b"34028234663852885981170418348451692544 ", 39);
+ check_exact!(f(f32::MIN_POSITIVE) => b"1175494350822287507968736537222245677818", -37);
+ check_exact!(f(minf32) => b"1401298464324817070923729583289916131280", -44);
+
+ // [1], Table 16: Stress Inputs for Converting 24-bit Binary to Decimal, < 1/2 ULP
+ check_exact_one!(f(12676506, -102; f32) => b"2", -23);
+ check_exact_one!(f(12676506, -103; f32) => b"12", -23);
+ check_exact_one!(f(15445013, 86; f32) => b"119", 34);
+ check_exact_one!(f(13734123, -138; f32) => b"3941", -34);
+ check_exact_one!(f(12428269, -130; f32) => b"91308", -32);
+ check_exact_one!(f(15334037, -146; f32) => b"171900", -36);
+ check_exact_one!(f(11518287, -41; f32) => b"5237910", -5);
+ check_exact_one!(f(12584953, -145; f32) => b"28216440", -36);
+ check_exact_one!(f(15961084, -125; f32) => b"375243281", -30);
+ check_exact_one!(f(14915817, -146; f32) => b"1672120916", -36);
+ check_exact_one!(f(10845484, -102; f32) => b"21388945814", -23);
+ check_exact_one!(f(16431059, -61; f32) => b"712583594561", -11);
+
+ // [1], Table 17: Stress Inputs for Converting 24-bit Binary to Decimal, > 1/2 ULP
+ check_exact_one!(f(16093626, 69; f32) => b"1", 29);
+ check_exact_one!(f( 9983778, 25; f32) => b"34", 15);
+ check_exact_one!(f(12745034, 104; f32) => b"259", 39);
+ check_exact_one!(f(12706553, 72; f32) => b"6001", 29);
+ check_exact_one!(f(11005028, 45; f32) => b"38721", 21);
+ check_exact_one!(f(15059547, 71; f32) => b"355584", 29);
+ check_exact_one!(f(16015691, -99; f32) => b"2526831", -22);
+ check_exact_one!(f( 8667859, 56; f32) => b"62458507", 24);
+ check_exact_one!(f(14855922, -82; f32) => b"307213267", -17);
+ check_exact_one!(f(14855922, -83; f32) => b"1536066333", -17);
+ check_exact_one!(f(10144164, -110; f32) => b"78147796834", -26);
+ check_exact_one!(f(13248074, 95; f32) => b"524810279937", 36);
+}
+
+pub fn f64_shortest_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ // 0.0999999999999999777955395074968691915273...
+ // 0.1000000000000000055511151231257827021181...
+ // 0.1000000000000000333066907387546962127089...
+ check_shortest!(f(0.1f64) => b"1", 0);
+
+ // this example is explicitly mentioned in the paper.
+ // 10^3 * 0.0999999999999999857891452847979962825775...
+ // 10^3 * 0.1 (exact)
+ // 10^3 * 0.1000000000000000142108547152020037174224...
+ check_shortest!(f(100.0f64) => b"1", 3);
+
+ // 0.3333333333333332593184650249895639717578...
+ // 0.3333333333333333148296162562473909929394... (1/3 in the default rounding)
+ // 0.3333333333333333703407674875052180141210...
+ check_shortest!(f(1.0f64/3.0) => b"3333333333333333", 0);
+
+ // explicit test case for equally closest representations.
+ // Dragon has its own tie-breaking rule; Grisu should fall back.
+ // 10^1 * 0.1000007629394531027955395074968691915273...
+ // 10^1 * 0.100000762939453125 (exact)
+ // 10^1 * 0.1000007629394531472044604925031308084726...
+ check_shortest!(f(1.00000762939453125f64) => b"10000076293945313", 1);
+
+ // 10^1 * 0.3141591999999999718085064159822650253772...
+ // 10^1 * 0.3141592000000000162174274009885266423225...
+ // 10^1 * 0.3141592000000000606263483859947882592678...
+ check_shortest!(f(3.141592f64) => b"3141592", 1);
+
+ // 10^18 * 0.314159199999999936
+ // 10^18 * 0.3141592 (exact)
+ // 10^18 * 0.314159200000000064
+ check_shortest!(f(3.141592e17f64) => b"3141592", 18);
+
+ // regression test for decoders
+ // 10^20 * 0.18446744073709549568
+ // 10^20 * 0.18446744073709551616
+ // 10^20 * 0.18446744073709555712
+ check_shortest!(f(ldexp_f64(1.0, 64)) => b"18446744073709552", 20);
+
+ // pathological case: high = 10^23 (exact). tie breaking should always prefer that.
+ // 10^24 * 0.099999999999999974834176
+ // 10^24 * 0.099999999999999991611392
+ // 10^24 * 0.100000000000000008388608
+ check_shortest!(f(1.0e23f64) => b"1", 24);
+
+ // 10^309 * 0.1797693134862315508561243283845062402343...
+ // 10^309 * 0.1797693134862315708145274237317043567980...
+ // 10^309 * 0.1797693134862315907729305190789024733617...
+ check_shortest!(f(f64::MAX) => b"17976931348623157", 309);
+
+ // 10^-307 * 0.2225073858507200889024586876085859887650...
+ // 10^-307 * 0.2225073858507201383090232717332404064219...
+ // 10^-307 * 0.2225073858507201877155878558578948240788...
+ check_shortest!(f(f64::MIN_POSITIVE) => b"22250738585072014", -307);
+
+ // 10^-323 * 0
+ // 10^-323 * 0.4940656458412465441765687928682213723650...
+ // 10^-323 * 0.9881312916824930883531375857364427447301...
+ let minf64 = ldexp_f64(1.0, -1074);
+ check_shortest!(f(minf64) => b"5", -323);
+}
+
+pub fn f64_exact_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ let minf64 = ldexp_f64(1.0, -1074);
+
+ check_exact!(f(0.1f64) => b"1000000000000000055511151231257827021181", 0);
+ check_exact!(f(0.45f64) => b"4500000000000000111022302462515654042363", 0);
+ check_exact!(f(0.5f64) => b"5 ", 0);
+ check_exact!(f(0.95f64) => b"9499999999999999555910790149937383830547", 0);
+ check_exact!(f(100.0f64) => b"1 ", 3);
+ check_exact!(f(999.5f64) => b"9995000000000000000000000000000000000000", 3);
+ check_exact!(f(1.0f64/3.0) => b"3333333333333333148296162562473909929394", 0);
+ check_exact!(f(3.141592f64) => b"3141592000000000162174274009885266423225", 1);
+ check_exact!(f(3.141592e17f64) => b"3141592 ", 18);
+ check_exact!(f(1.0e23f64) => b"99999999999999991611392 ", 23);
+ check_exact!(f(f64::MAX) => b"1797693134862315708145274237317043567980", 309);
+ check_exact!(f(f64::MIN_POSITIVE) => b"2225073858507201383090232717332404064219", -307);
+ check_exact!(f(minf64) => b"4940656458412465441765687928682213723650\
+ 5980261432476442558568250067550727020875\
+ 1865299836361635992379796564695445717730\
+ 9266567103559397963987747960107818781263\
+ 0071319031140452784581716784898210368871\
+ 8636056998730723050006387409153564984387\
+ 3124733972731696151400317153853980741262\
+ 3856559117102665855668676818703956031062\
+ 4931945271591492455329305456544401127480\
+ 1297099995419319894090804165633245247571\
+ 4786901472678015935523861155013480352649\
+ 3472019379026810710749170333222684475333\
+ 5720832431936092382893458368060106011506\
+ 1698097530783422773183292479049825247307\
+ 7637592724787465608477820373446969953364\
+ 7017972677717585125660551199131504891101\
+ 4510378627381672509558373897335989936648\
+ 0994116420570263709027924276754456522908\
+ 7538682506419718265533447265625 ", -323);
+
+ // [1], Table 3: Stress Inputs for Converting 53-bit Binary to Decimal, < 1/2 ULP
+ check_exact_one!(f(8511030020275656, -342; f64) => b"9", -87);
+ check_exact_one!(f(5201988407066741, -824; f64) => b"46", -232);
+ check_exact_one!(f(6406892948269899, 237; f64) => b"141", 88);
+ check_exact_one!(f(8431154198732492, 72; f64) => b"3981", 38);
+ check_exact_one!(f(6475049196144587, 99; f64) => b"41040", 46);
+ check_exact_one!(f(8274307542972842, 726; f64) => b"292084", 235);
+ check_exact_one!(f(5381065484265332, -456; f64) => b"2891946", -121);
+ check_exact_one!(f(6761728585499734, -1057; f64) => b"43787718", -302);
+ check_exact_one!(f(7976538478610756, 376; f64) => b"122770163", 130);
+ check_exact_one!(f(5982403858958067, 377; f64) => b"1841552452", 130);
+ check_exact_one!(f(5536995190630837, 93; f64) => b"54835744350", 44);
+ check_exact_one!(f(7225450889282194, 710; f64) => b"389190181146", 230);
+ check_exact_one!(f(7225450889282194, 709; f64) => b"1945950905732", 230);
+ check_exact_one!(f(8703372741147379, 117; f64) => b"14460958381605", 52);
+ check_exact_one!(f(8944262675275217, -1001; f64) => b"417367747458531", -285);
+ check_exact_one!(f(7459803696087692, -707; f64) => b"1107950772878888", -196);
+ check_exact_one!(f(6080469016670379, -381; f64) => b"12345501366327440", -98);
+ check_exact_one!(f(8385515147034757, 721; f64) => b"925031711960365024", 233);
+ check_exact_one!(f(7514216811389786, -828; f64) => b"4198047150284889840", -233);
+ check_exact_one!(f(8397297803260511, -345; f64) => b"11716315319786511046", -87);
+ check_exact_one!(f(6733459239310543, 202; f64) => b"432810072844612493629", 77);
+ check_exact_one!(f(8091450587292794, -473; f64) => b"3317710118160031081518", -126);
+
+ // [1], Table 4: Stress Inputs for Converting 53-bit Binary to Decimal, > 1/2 ULP
+ check_exact_one!(f(6567258882077402, 952; f64) => b"3", 303);
+ check_exact_one!(f(6712731423444934, 535; f64) => b"76", 177);
+ check_exact_one!(f(6712731423444934, 534; f64) => b"378", 177);
+ check_exact_one!(f(5298405411573037, -957; f64) => b"4350", -272);
+ check_exact_one!(f(5137311167659507, -144; f64) => b"23037", -27);
+ check_exact_one!(f(6722280709661868, 363; f64) => b"126301", 126);
+ check_exact_one!(f(5344436398034927, -169; f64) => b"7142211", -35);
+ check_exact_one!(f(8369123604277281, -853; f64) => b"13934574", -240);
+ check_exact_one!(f(8995822108487663, -780; f64) => b"141463449", -218);
+ check_exact_one!(f(8942832835564782, -383; f64) => b"4539277920", -99);
+ check_exact_one!(f(8942832835564782, -384; f64) => b"22696389598", -99);
+ check_exact_one!(f(8942832835564782, -385; f64) => b"113481947988", -99);
+ check_exact_one!(f(6965949469487146, -249; f64) => b"7700366561890", -59);
+ check_exact_one!(f(6965949469487146, -250; f64) => b"38501832809448", -59);
+ check_exact_one!(f(6965949469487146, -251; f64) => b"192509164047238", -59);
+ check_exact_one!(f(7487252720986826, 548; f64) => b"6898586531774201", 181);
+ check_exact_one!(f(5592117679628511, 164; f64) => b"13076622631878654", 66);
+ check_exact_one!(f(8887055249355788, 665; f64) => b"136052020756121240", 217);
+ check_exact_one!(f(6994187472632449, 690; f64) => b"3592810217475959676", 224);
+ check_exact_one!(f(8797576579012143, 588; f64) => b"89125197712484551899", 193);
+ check_exact_one!(f(7363326733505337, 272; f64) => b"558769757362301140950", 98);
+ check_exact_one!(f(8549497411294502, -448; f64) => b"1176257830728540379990", -118);
+}
+
+pub fn more_shortest_sanity_test<F>(mut f: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ check_shortest!(f{mant: 99_999_999_999_999_999, minus: 1, plus: 1,
+ exp: 0, inclusive: true} => b"1", 18);
+ check_shortest!(f{mant: 99_999_999_999_999_999, minus: 1, plus: 1,
+ exp: 0, inclusive: false} => b"99999999999999999", 17);
+}
+
+fn to_string_with_parts<F>(mut f: F) -> String
+where
+ F: for<'a> FnMut(&'a mut [MaybeUninit<u8>], &'a mut [MaybeUninit<Part<'a>>]) -> Formatted<'a>,
+{
+ let mut buf = [MaybeUninit::new(0); 1024];
+ let mut parts = [MaybeUninit::new(Part::Zero(0)); 16];
+ let formatted = f(&mut buf, &mut parts);
+ let mut ret = vec![0; formatted.len()];
+ assert_eq!(formatted.write(&mut ret), Some(ret.len()));
+ String::from_utf8(ret).unwrap()
+}
+
+pub fn to_shortest_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, frac_digits: usize) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_shortest_str(|d, b| f(d, b), v, sign, frac_digits, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, 0), "0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, 0), "+0");
+ assert_eq!(to_string(f, -0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, -0.0, MinusRaw, 0), "-0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, 0), "-0");
+ assert_eq!(to_string(f, 0.0, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 1), "+0.0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, 1), "+0.0");
+ assert_eq!(to_string(f, -0.0, Minus, 8), "0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusRaw, 8), "-0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 8), "+0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, 8), "-0.00000000");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 0), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusRaw, 0), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, 0), "+inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlusRaw, 0), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 0), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusRaw, 1), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, 8), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlusRaw, 64), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 0), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusRaw, 1), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, 8), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlusRaw, 64), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, 0), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusRaw, 0), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 0), "+3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, 0), "+3.14");
+ assert_eq!(to_string(f, -3.14, Minus, 0), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusRaw, 0), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 0), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, 0), "-3.14");
+ assert_eq!(to_string(f, 3.14, Minus, 1), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusRaw, 2), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 3), "+3.140");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, 4), "+3.1400");
+ assert_eq!(to_string(f, -3.14, Minus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusRaw, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, 8), "-3.14000000");
+
+ assert_eq!(to_string(f, 7.5e-11, Minus, 0), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 3), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 12), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 13), "0.0000000000750");
+
+ assert_eq!(to_string(f, 1.9971e20, Minus, 0), "199710000000000000000");
+ assert_eq!(to_string(f, 1.9971e20, Minus, 1), "199710000000000000000.0");
+ assert_eq!(to_string(f, 1.9971e20, Minus, 8), "199710000000000000000.00000000");
+
+ assert_eq!(to_string(f, f32::MAX, Minus, 0), format!("34028235{:0>31}", ""));
+ assert_eq!(to_string(f, f32::MAX, Minus, 1), format!("34028235{:0>31}.0", ""));
+ assert_eq!(to_string(f, f32::MAX, Minus, 8), format!("34028235{:0>31}.00000000", ""));
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, 0), format!("0.{:0>44}1", ""));
+ assert_eq!(to_string(f, minf32, Minus, 45), format!("0.{:0>44}1", ""));
+ assert_eq!(to_string(f, minf32, Minus, 46), format!("0.{:0>44}10", ""));
+
+ assert_eq!(to_string(f, f64::MAX, Minus, 0), format!("17976931348623157{:0>292}", ""));
+ assert_eq!(to_string(f, f64::MAX, Minus, 1), format!("17976931348623157{:0>292}.0", ""));
+ assert_eq!(to_string(f, f64::MAX, Minus, 8), format!("17976931348623157{:0>292}.00000000", ""));
+
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, 0), format!("0.{:0>323}5", ""));
+ assert_eq!(to_string(f, minf64, Minus, 324), format!("0.{:0>323}5", ""));
+ assert_eq!(to_string(f, minf64, Minus, 325), format!("0.{:0>323}50", ""));
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ // very large output
+ assert_eq!(to_string(f, 1.1, Minus, 80000), format!("1.1{:0>79999}", ""));
+}
+
+pub fn to_shortest_exp_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, exp_bounds: (i16, i16), upper: bool) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_shortest_exp_str(|d, b| f(d, b), v, sign, exp_bounds, upper, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, (-4, 16), false), "0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, (-4, 16), false), "0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, (-4, 16), false), "+0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, (-4, 16), false), "+0");
+ assert_eq!(to_string(f, -0.0, Minus, (-4, 16), false), "0");
+ assert_eq!(to_string(f, -0.0, MinusRaw, (-4, 16), false), "-0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, (-4, 16), false), "+0");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, (-4, 16), false), "-0");
+ assert_eq!(to_string(f, 0.0, Minus, (0, 0), true), "0E0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, (0, 0), false), "0e0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, (-9, -5), true), "+0E0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, (5, 9), false), "+0e0");
+ assert_eq!(to_string(f, -0.0, Minus, (0, 0), true), "0E0");
+ assert_eq!(to_string(f, -0.0, MinusRaw, (0, 0), false), "-0e0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, (-9, -5), true), "+0E0");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, (5, 9), false), "-0e0");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, (-4, 16), false), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusRaw, (-4, 16), true), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, (-4, 16), false), "+inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlusRaw, (-4, 16), true), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, (0, 0), false), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusRaw, (0, 0), true), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, (-9, -5), false), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlusRaw, (5, 9), true), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, (0, 0), false), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusRaw, (0, 0), true), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, (-9, -5), false), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlusRaw, (5, 9), true), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, (-4, 16), false), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusRaw, (-4, 16), false), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, (-4, 16), false), "+3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, (-4, 16), false), "+3.14");
+ assert_eq!(to_string(f, -3.14, Minus, (-4, 16), false), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusRaw, (-4, 16), false), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusPlus, (-4, 16), false), "-3.14");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, (-4, 16), false), "-3.14");
+ assert_eq!(to_string(f, 3.14, Minus, (0, 0), true), "3.14E0");
+ assert_eq!(to_string(f, 3.14, MinusRaw, (0, 0), false), "3.14e0");
+ assert_eq!(to_string(f, 3.14, MinusPlus, (-9, -5), true), "+3.14E0");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, (5, 9), false), "+3.14e0");
+ assert_eq!(to_string(f, -3.14, Minus, (0, 0), true), "-3.14E0");
+ assert_eq!(to_string(f, -3.14, MinusRaw, (0, 0), false), "-3.14e0");
+ assert_eq!(to_string(f, -3.14, MinusPlus, (-9, -5), true), "-3.14E0");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, (5, 9), false), "-3.14e0");
+
+ assert_eq!(to_string(f, 0.1, Minus, (-4, 16), false), "0.1");
+ assert_eq!(to_string(f, 0.1, MinusRaw, (-4, 16), false), "0.1");
+ assert_eq!(to_string(f, 0.1, MinusPlus, (-4, 16), false), "+0.1");
+ assert_eq!(to_string(f, 0.1, MinusPlusRaw, (-4, 16), false), "+0.1");
+ assert_eq!(to_string(f, -0.1, Minus, (-4, 16), false), "-0.1");
+ assert_eq!(to_string(f, -0.1, MinusRaw, (-4, 16), false), "-0.1");
+ assert_eq!(to_string(f, -0.1, MinusPlus, (-4, 16), false), "-0.1");
+ assert_eq!(to_string(f, -0.1, MinusPlusRaw, (-4, 16), false), "-0.1");
+ assert_eq!(to_string(f, 0.1, Minus, (0, 0), true), "1E-1");
+ assert_eq!(to_string(f, 0.1, MinusRaw, (0, 0), false), "1e-1");
+ assert_eq!(to_string(f, 0.1, MinusPlus, (-9, -5), true), "+1E-1");
+ assert_eq!(to_string(f, 0.1, MinusPlusRaw, (5, 9), false), "+1e-1");
+ assert_eq!(to_string(f, -0.1, Minus, (0, 0), true), "-1E-1");
+ assert_eq!(to_string(f, -0.1, MinusRaw, (0, 0), false), "-1e-1");
+ assert_eq!(to_string(f, -0.1, MinusPlus, (-9, -5), true), "-1E-1");
+ assert_eq!(to_string(f, -0.1, MinusPlusRaw, (5, 9), false), "-1e-1");
+
+ assert_eq!(to_string(f, 7.5e-11, Minus, (-4, 16), false), "7.5e-11");
+ assert_eq!(to_string(f, 7.5e-11, Minus, (-11, 10), false), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, (-10, 11), false), "7.5e-11");
+
+ assert_eq!(to_string(f, 1.9971e20, Minus, (-4, 16), false), "1.9971e20");
+ assert_eq!(to_string(f, 1.9971e20, Minus, (-20, 21), false), "199710000000000000000");
+ assert_eq!(to_string(f, 1.9971e20, Minus, (-21, 20), false), "1.9971e20");
+
+ // the true value of 1.0e23f64 is less than 10^23, but that shouldn't matter here
+ assert_eq!(to_string(f, 1.0e23, Minus, (22, 23), false), "1e23");
+ assert_eq!(to_string(f, 1.0e23, Minus, (23, 24), false), "100000000000000000000000");
+ assert_eq!(to_string(f, 1.0e23, Minus, (24, 25), false), "1e23");
+
+ assert_eq!(to_string(f, f32::MAX, Minus, (-4, 16), false), "3.4028235e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, (-39, 38), false), "3.4028235e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, (-38, 39), false), format!("34028235{:0>31}", ""));
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, (-4, 16), false), "1e-45");
+ assert_eq!(to_string(f, minf32, Minus, (-44, 45), false), "1e-45");
+ assert_eq!(to_string(f, minf32, Minus, (-45, 44), false), format!("0.{:0>44}1", ""));
+
+ assert_eq!(to_string(f, f64::MAX, Minus, (-4, 16), false), "1.7976931348623157e308");
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, (-308, 309), false),
+ format!("17976931348623157{:0>292}", "")
+ );
+ assert_eq!(to_string(f, f64::MAX, Minus, (-309, 308), false), "1.7976931348623157e308");
+
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, (-4, 16), false), "5e-324");
+ assert_eq!(to_string(f, minf64, Minus, (-324, 323), false), format!("0.{:0>323}5", ""));
+ assert_eq!(to_string(f, minf64, Minus, (-323, 324), false), "5e-324");
+
+ assert_eq!(to_string(f, 1.1, Minus, (i16::MIN, i16::MAX), false), "1.1");
+}
+
+pub fn to_exact_exp_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, ndigits: usize, upper: bool) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_exact_exp_str(|d, b, l| f(d, b, l), v, sign, ndigits, upper, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, 1, true), "0E0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, 1, false), "0e0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 1, true), "+0E0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, 1, false), "+0e0");
+ assert_eq!(to_string(f, -0.0, Minus, 1, true), "0E0");
+ assert_eq!(to_string(f, -0.0, MinusRaw, 1, false), "-0e0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 1, true), "+0E0");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, 1, false), "-0e0");
+ assert_eq!(to_string(f, 0.0, Minus, 2, true), "0.0E0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, 2, false), "0.0e0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 2, true), "+0.0E0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, 2, false), "+0.0e0");
+ assert_eq!(to_string(f, -0.0, Minus, 8, true), "0.0000000E0");
+ assert_eq!(to_string(f, -0.0, MinusRaw, 8, false), "-0.0000000e0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 8, true), "+0.0000000E0");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, 8, false), "-0.0000000e0");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 1, false), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusRaw, 1, true), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, 1, false), "+inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlusRaw, 1, true), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 8, false), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusRaw, 8, true), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, 8, false), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlusRaw, 8, true), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 64, false), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusRaw, 64, true), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, 64, false), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlusRaw, 64, true), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, 1, true), "3E0");
+ assert_eq!(to_string(f, 3.14, MinusRaw, 1, false), "3e0");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 1, true), "+3E0");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, 1, false), "+3e0");
+ assert_eq!(to_string(f, -3.14, Minus, 2, true), "-3.1E0");
+ assert_eq!(to_string(f, -3.14, MinusRaw, 2, false), "-3.1e0");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 2, true), "-3.1E0");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, 2, false), "-3.1e0");
+ assert_eq!(to_string(f, 3.14, Minus, 3, true), "3.14E0");
+ assert_eq!(to_string(f, 3.14, MinusRaw, 3, false), "3.14e0");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 3, true), "+3.14E0");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, 3, false), "+3.14e0");
+ assert_eq!(to_string(f, -3.14, Minus, 4, true), "-3.140E0");
+ assert_eq!(to_string(f, -3.14, MinusRaw, 4, false), "-3.140e0");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 4, true), "-3.140E0");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, 4, false), "-3.140e0");
+
+ assert_eq!(to_string(f, 0.195, Minus, 1, false), "2e-1");
+ assert_eq!(to_string(f, 0.195, MinusRaw, 1, true), "2E-1");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 1, false), "+2e-1");
+ assert_eq!(to_string(f, 0.195, MinusPlusRaw, 1, true), "+2E-1");
+ assert_eq!(to_string(f, -0.195, Minus, 2, false), "-2.0e-1");
+ assert_eq!(to_string(f, -0.195, MinusRaw, 2, true), "-2.0E-1");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 2, false), "-2.0e-1");
+ assert_eq!(to_string(f, -0.195, MinusPlusRaw, 2, true), "-2.0E-1");
+ assert_eq!(to_string(f, 0.195, Minus, 3, false), "1.95e-1");
+ assert_eq!(to_string(f, 0.195, MinusRaw, 3, true), "1.95E-1");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 3, false), "+1.95e-1");
+ assert_eq!(to_string(f, 0.195, MinusPlusRaw, 3, true), "+1.95E-1");
+ assert_eq!(to_string(f, -0.195, Minus, 4, false), "-1.950e-1");
+ assert_eq!(to_string(f, -0.195, MinusRaw, 4, true), "-1.950E-1");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 4, false), "-1.950e-1");
+ assert_eq!(to_string(f, -0.195, MinusPlusRaw, 4, true), "-1.950E-1");
+
+ assert_eq!(to_string(f, 9.5, Minus, 1, false), "1e1");
+ assert_eq!(to_string(f, 9.5, Minus, 2, false), "9.5e0");
+ assert_eq!(to_string(f, 9.5, Minus, 3, false), "9.50e0");
+ assert_eq!(to_string(f, 9.5, Minus, 30, false), "9.50000000000000000000000000000e0");
+
+ assert_eq!(to_string(f, 1.0e25, Minus, 1, false), "1e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 2, false), "1.0e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 15, false), "1.00000000000000e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 16, false), "1.000000000000000e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 17, false), "1.0000000000000001e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 18, false), "1.00000000000000009e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 19, false), "1.000000000000000091e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 20, false), "1.0000000000000000906e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 21, false), "1.00000000000000009060e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 22, false), "1.000000000000000090597e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 23, false), "1.0000000000000000905970e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 24, false), "1.00000000000000009059697e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 25, false), "1.000000000000000090596966e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 26, false), "1.0000000000000000905969664e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 27, false), "1.00000000000000009059696640e25");
+ assert_eq!(to_string(f, 1.0e25, Minus, 30, false), "1.00000000000000009059696640000e25");
+
+ assert_eq!(to_string(f, 1.0e-6, Minus, 1, false), "1e-6");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 2, false), "1.0e-6");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 16, false), "1.000000000000000e-6");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 17, false), "9.9999999999999995e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 18, false), "9.99999999999999955e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 19, false), "9.999999999999999547e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 20, false), "9.9999999999999995475e-7");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 30, false), "9.99999999999999954748111825886e-7");
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 40, false),
+ "9.999999999999999547481118258862586856139e-7"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 50, false),
+ "9.9999999999999995474811182588625868561393872369081e-7"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 60, false),
+ "9.99999999999999954748111825886258685613938723690807819366455e-7"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 70, false),
+ "9.999999999999999547481118258862586856139387236908078193664550781250000e-7"
+ );
+
+ assert_eq!(to_string(f, f32::MAX, Minus, 1, false), "3e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 2, false), "3.4e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 4, false), "3.403e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 8, false), "3.4028235e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 16, false), "3.402823466385289e38");
+ assert_eq!(to_string(f, f32::MAX, Minus, 32, false), "3.4028234663852885981170418348452e38");
+ assert_eq!(
+ to_string(f, f32::MAX, Minus, 64, false),
+ "3.402823466385288598117041834845169254400000000000000000000000000e38"
+ );
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, 1, false), "1e-45");
+ assert_eq!(to_string(f, minf32, Minus, 2, false), "1.4e-45");
+ assert_eq!(to_string(f, minf32, Minus, 4, false), "1.401e-45");
+ assert_eq!(to_string(f, minf32, Minus, 8, false), "1.4012985e-45");
+ assert_eq!(to_string(f, minf32, Minus, 16, false), "1.401298464324817e-45");
+ assert_eq!(to_string(f, minf32, Minus, 32, false), "1.4012984643248170709237295832899e-45");
+ assert_eq!(
+ to_string(f, minf32, Minus, 64, false),
+ "1.401298464324817070923729583289916131280261941876515771757068284e-45"
+ );
+ assert_eq!(
+ to_string(f, minf32, Minus, 128, false),
+ "1.401298464324817070923729583289916131280261941876515771757068283\
+ 8897910826858606014866381883621215820312500000000000000000000000e-45"
+ );
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ assert_eq!(to_string(f, f64::MAX, Minus, 1, false), "2e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 2, false), "1.8e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 4, false), "1.798e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 8, false), "1.7976931e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 16, false), "1.797693134862316e308");
+ assert_eq!(to_string(f, f64::MAX, Minus, 32, false), "1.7976931348623157081452742373170e308");
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 64, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768e308"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 128, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432133e308"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 256, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978e308"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 512, false),
+ "1.797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978\
+ 2620414472316873817718091929988125040402618412485836800000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000e308"
+ );
+
+ // okay, this is becoming tough. fortunately for us, this is almost the worst case.
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, 1, false), "5e-324");
+ assert_eq!(to_string(f, minf64, Minus, 2, false), "4.9e-324");
+ assert_eq!(to_string(f, minf64, Minus, 4, false), "4.941e-324");
+ assert_eq!(to_string(f, minf64, Minus, 8, false), "4.9406565e-324");
+ assert_eq!(to_string(f, minf64, Minus, 16, false), "4.940656458412465e-324");
+ assert_eq!(to_string(f, minf64, Minus, 32, false), "4.9406564584124654417656879286822e-324");
+ assert_eq!(
+ to_string(f, minf64, Minus, 64, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 128, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 256, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671\
+ 0355939796398774796010781878126300713190311404527845817167848982\
+ 1036887186360569987307230500063874091535649843873124733972731696e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 512, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671\
+ 0355939796398774796010781878126300713190311404527845817167848982\
+ 1036887186360569987307230500063874091535649843873124733972731696\
+ 1514003171538539807412623856559117102665855668676818703956031062\
+ 4931945271591492455329305456544401127480129709999541931989409080\
+ 4165633245247571478690147267801593552386115501348035264934720193\
+ 7902681071074917033322268447533357208324319360923828934583680601e-324"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 1024, false),
+ "4.940656458412465441765687928682213723650598026143247644255856825\
+ 0067550727020875186529983636163599237979656469544571773092665671\
+ 0355939796398774796010781878126300713190311404527845817167848982\
+ 1036887186360569987307230500063874091535649843873124733972731696\
+ 1514003171538539807412623856559117102665855668676818703956031062\
+ 4931945271591492455329305456544401127480129709999541931989409080\
+ 4165633245247571478690147267801593552386115501348035264934720193\
+ 7902681071074917033322268447533357208324319360923828934583680601\
+ 0601150616980975307834227731832924790498252473077637592724787465\
+ 6084778203734469699533647017972677717585125660551199131504891101\
+ 4510378627381672509558373897335989936648099411642057026370902792\
+ 4276754456522908753868250641971826553344726562500000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000e-324"
+ );
+
+ // very large output
+ assert_eq!(to_string(f, 0.0, Minus, 80000, false), format!("0.{:0>79999}e0", ""));
+ assert_eq!(to_string(f, 1.0e1, Minus, 80000, false), format!("1.{:0>79999}e1", ""));
+ assert_eq!(to_string(f, 1.0e0, Minus, 80000, false), format!("1.{:0>79999}e0", ""));
+ assert_eq!(
+ to_string(f, 1.0e-1, Minus, 80000, false),
+ format!(
+ "1.000000000000000055511151231257827021181583404541015625{:0>79945}\
+ e-1",
+ ""
+ )
+ );
+ assert_eq!(
+ to_string(f, 1.0e-20, Minus, 80000, false),
+ format!(
+ "9.999999999999999451532714542095716517295037027873924471077157760\
+ 66783064379706047475337982177734375{:0>79901}e-21",
+ ""
+ )
+ );
+}
+
+pub fn to_exact_fixed_str_test<F>(mut f_: F)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+{
+ use core::num::flt2dec::Sign::*;
+
+ fn to_string<T, F>(f: &mut F, v: T, sign: Sign, frac_digits: usize) -> String
+ where
+ T: DecodableFloat,
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
+ {
+ to_string_with_parts(|buf, parts| {
+ to_exact_fixed_str(|d, b, l| f(d, b, l), v, sign, frac_digits, buf, parts)
+ })
+ }
+
+ let f = &mut f_;
+
+ assert_eq!(to_string(f, 0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, 0), "0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, 0), "+0");
+ assert_eq!(to_string(f, -0.0, Minus, 0), "0");
+ assert_eq!(to_string(f, -0.0, MinusRaw, 0), "-0");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, 0), "-0");
+ assert_eq!(to_string(f, 0.0, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, MinusRaw, 1), "0.0");
+ assert_eq!(to_string(f, 0.0, MinusPlus, 1), "+0.0");
+ assert_eq!(to_string(f, 0.0, MinusPlusRaw, 1), "+0.0");
+ assert_eq!(to_string(f, -0.0, Minus, 8), "0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusRaw, 8), "-0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusPlus, 8), "+0.00000000");
+ assert_eq!(to_string(f, -0.0, MinusPlusRaw, 8), "-0.00000000");
+
+ assert_eq!(to_string(f, 1.0 / 0.0, Minus, 0), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusRaw, 1), "inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlus, 8), "+inf");
+ assert_eq!(to_string(f, 1.0 / 0.0, MinusPlusRaw, 64), "+inf");
+ assert_eq!(to_string(f, 0.0 / 0.0, Minus, 0), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusRaw, 1), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlus, 8), "NaN");
+ assert_eq!(to_string(f, 0.0 / 0.0, MinusPlusRaw, 64), "NaN");
+ assert_eq!(to_string(f, -1.0 / 0.0, Minus, 0), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusRaw, 1), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlus, 8), "-inf");
+ assert_eq!(to_string(f, -1.0 / 0.0, MinusPlusRaw, 64), "-inf");
+
+ assert_eq!(to_string(f, 3.14, Minus, 0), "3");
+ assert_eq!(to_string(f, 3.14, MinusRaw, 0), "3");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 0), "+3");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, 0), "+3");
+ assert_eq!(to_string(f, -3.14, Minus, 0), "-3");
+ assert_eq!(to_string(f, -3.14, MinusRaw, 0), "-3");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 0), "-3");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, 0), "-3");
+ assert_eq!(to_string(f, 3.14, Minus, 1), "3.1");
+ assert_eq!(to_string(f, 3.14, MinusRaw, 2), "3.14");
+ assert_eq!(to_string(f, 3.14, MinusPlus, 3), "+3.140");
+ assert_eq!(to_string(f, 3.14, MinusPlusRaw, 4), "+3.1400");
+ assert_eq!(to_string(f, -3.14, Minus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusRaw, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusPlus, 8), "-3.14000000");
+ assert_eq!(to_string(f, -3.14, MinusPlusRaw, 8), "-3.14000000");
+
+ assert_eq!(to_string(f, 0.195, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.195, MinusRaw, 0), "0");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 0), "+0");
+ assert_eq!(to_string(f, 0.195, MinusPlusRaw, 0), "+0");
+ assert_eq!(to_string(f, -0.195, Minus, 0), "-0");
+ assert_eq!(to_string(f, -0.195, MinusRaw, 0), "-0");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 0), "-0");
+ assert_eq!(to_string(f, -0.195, MinusPlusRaw, 0), "-0");
+ assert_eq!(to_string(f, 0.195, Minus, 1), "0.2");
+ assert_eq!(to_string(f, 0.195, MinusRaw, 2), "0.20");
+ assert_eq!(to_string(f, 0.195, MinusPlus, 3), "+0.195");
+ assert_eq!(to_string(f, 0.195, MinusPlusRaw, 4), "+0.1950");
+ assert_eq!(to_string(f, -0.195, Minus, 5), "-0.19500");
+ assert_eq!(to_string(f, -0.195, MinusRaw, 6), "-0.195000");
+ assert_eq!(to_string(f, -0.195, MinusPlus, 7), "-0.1950000");
+ assert_eq!(to_string(f, -0.195, MinusPlusRaw, 8), "-0.19500000");
+
+ assert_eq!(to_string(f, 999.5, Minus, 0), "1000");
+ assert_eq!(to_string(f, 999.5, Minus, 1), "999.5");
+ assert_eq!(to_string(f, 999.5, Minus, 2), "999.50");
+ assert_eq!(to_string(f, 999.5, Minus, 3), "999.500");
+ assert_eq!(to_string(f, 999.5, Minus, 30), "999.500000000000000000000000000000");
+
+ assert_eq!(to_string(f, 0.5, Minus, 0), "1");
+ assert_eq!(to_string(f, 0.5, Minus, 1), "0.5");
+ assert_eq!(to_string(f, 0.5, Minus, 2), "0.50");
+ assert_eq!(to_string(f, 0.5, Minus, 3), "0.500");
+
+ assert_eq!(to_string(f, 0.95, Minus, 0), "1");
+ assert_eq!(to_string(f, 0.95, Minus, 1), "0.9"); // because it really is less than 0.95
+ assert_eq!(to_string(f, 0.95, Minus, 2), "0.95");
+ assert_eq!(to_string(f, 0.95, Minus, 3), "0.950");
+ assert_eq!(to_string(f, 0.95, Minus, 10), "0.9500000000");
+ assert_eq!(to_string(f, 0.95, Minus, 30), "0.949999999999999955591079014994");
+
+ assert_eq!(to_string(f, 0.095, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.095, Minus, 1), "0.1");
+ assert_eq!(to_string(f, 0.095, Minus, 2), "0.10");
+ assert_eq!(to_string(f, 0.095, Minus, 3), "0.095");
+ assert_eq!(to_string(f, 0.095, Minus, 4), "0.0950");
+ assert_eq!(to_string(f, 0.095, Minus, 10), "0.0950000000");
+ assert_eq!(to_string(f, 0.095, Minus, 30), "0.095000000000000001110223024625");
+
+ assert_eq!(to_string(f, 0.0095, Minus, 0), "0");
+ assert_eq!(to_string(f, 0.0095, Minus, 1), "0.0");
+ assert_eq!(to_string(f, 0.0095, Minus, 2), "0.01");
+ assert_eq!(to_string(f, 0.0095, Minus, 3), "0.009"); // really is less than 0.0095
+ assert_eq!(to_string(f, 0.0095, Minus, 4), "0.0095");
+ assert_eq!(to_string(f, 0.0095, Minus, 5), "0.00950");
+ assert_eq!(to_string(f, 0.0095, Minus, 10), "0.0095000000");
+ assert_eq!(to_string(f, 0.0095, Minus, 30), "0.009499999999999999764077607267");
+
+ assert_eq!(to_string(f, 7.5e-11, Minus, 0), "0");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 3), "0.000");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 10), "0.0000000001");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 11), "0.00000000007"); // ditto
+ assert_eq!(to_string(f, 7.5e-11, Minus, 12), "0.000000000075");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 13), "0.0000000000750");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 20), "0.00000000007500000000");
+ assert_eq!(to_string(f, 7.5e-11, Minus, 30), "0.000000000074999999999999999501");
+
+ assert_eq!(to_string(f, 1.0e25, Minus, 0), "10000000000000000905969664");
+ assert_eq!(to_string(f, 1.0e25, Minus, 1), "10000000000000000905969664.0");
+ assert_eq!(to_string(f, 1.0e25, Minus, 3), "10000000000000000905969664.000");
+
+ assert_eq!(to_string(f, 1.0e-6, Minus, 0), "0");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 3), "0.000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 6), "0.000001");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 9), "0.000001000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 12), "0.000001000000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 22), "0.0000010000000000000000");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 23), "0.00000099999999999999995");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 24), "0.000000999999999999999955");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 25), "0.0000009999999999999999547");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 35), "0.00000099999999999999995474811182589");
+ assert_eq!(to_string(f, 1.0e-6, Minus, 45), "0.000000999999999999999954748111825886258685614");
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 55),
+ "0.0000009999999999999999547481118258862586856139387236908"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 65),
+ "0.00000099999999999999995474811182588625868561393872369080781936646"
+ );
+ assert_eq!(
+ to_string(f, 1.0e-6, Minus, 75),
+ "0.000000999999999999999954748111825886258685613938723690807819366455078125000"
+ );
+
+ assert_eq!(to_string(f, f32::MAX, Minus, 0), "340282346638528859811704183484516925440");
+ assert_eq!(to_string(f, f32::MAX, Minus, 1), "340282346638528859811704183484516925440.0");
+ assert_eq!(to_string(f, f32::MAX, Minus, 2), "340282346638528859811704183484516925440.00");
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let minf32 = ldexp_f32(1.0, -149);
+ assert_eq!(to_string(f, minf32, Minus, 0), "0");
+ assert_eq!(to_string(f, minf32, Minus, 1), "0.0");
+ assert_eq!(to_string(f, minf32, Minus, 2), "0.00");
+ assert_eq!(to_string(f, minf32, Minus, 4), "0.0000");
+ assert_eq!(to_string(f, minf32, Minus, 8), "0.00000000");
+ assert_eq!(to_string(f, minf32, Minus, 16), "0.0000000000000000");
+ assert_eq!(to_string(f, minf32, Minus, 32), "0.00000000000000000000000000000000");
+ assert_eq!(
+ to_string(f, minf32, Minus, 64),
+ "0.0000000000000000000000000000000000000000000014012984643248170709"
+ );
+ assert_eq!(
+ to_string(f, minf32, Minus, 128),
+ "0.0000000000000000000000000000000000000000000014012984643248170709\
+ 2372958328991613128026194187651577175706828388979108268586060149"
+ );
+ assert_eq!(
+ to_string(f, minf32, Minus, 256),
+ "0.0000000000000000000000000000000000000000000014012984643248170709\
+ 2372958328991613128026194187651577175706828388979108268586060148\
+ 6638188362121582031250000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000"
+ );
+
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 0),
+ "1797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978\
+ 26204144723168738177180919299881250404026184124858368"
+ );
+ assert_eq!(
+ to_string(f, f64::MAX, Minus, 10),
+ "1797693134862315708145274237317043567980705675258449965989174768\
+ 0315726078002853876058955863276687817154045895351438246423432132\
+ 6889464182768467546703537516986049910576551282076245490090389328\
+ 9440758685084551339423045832369032229481658085593321233482747978\
+ 26204144723168738177180919299881250404026184124858368.0000000000"
+ );
+
+ let minf64 = ldexp_f64(1.0, -1074);
+ assert_eq!(to_string(f, minf64, Minus, 0), "0");
+ assert_eq!(to_string(f, minf64, Minus, 1), "0.0");
+ assert_eq!(to_string(f, minf64, Minus, 10), "0.0000000000");
+ assert_eq!(
+ to_string(f, minf64, Minus, 100),
+ "0.0000000000000000000000000000000000000000000000000000000000000000\
+ 000000000000000000000000000000000000"
+ );
+ assert_eq!(
+ to_string(f, minf64, Minus, 1000),
+ "0.0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0000000000000000000000000000000000000000000000000000000000000000\
+ 0004940656458412465441765687928682213723650598026143247644255856\
+ 8250067550727020875186529983636163599237979656469544571773092665\
+ 6710355939796398774796010781878126300713190311404527845817167848\
+ 9821036887186360569987307230500063874091535649843873124733972731\
+ 6961514003171538539807412623856559117102665855668676818703956031\
+ 0624931945271591492455329305456544401127480129709999541931989409\
+ 0804165633245247571478690147267801593552386115501348035264934720\
+ 1937902681071074917033322268447533357208324319360923828934583680\
+ 6010601150616980975307834227731832924790498252473077637592724787\
+ 4656084778203734469699533647017972677717585125660551199131504891\
+ 1014510378627381672509558373897335989937"
+ );
+
+ // very large output
+ assert_eq!(to_string(f, 0.0, Minus, 80000), format!("0.{:0>80000}", ""));
+ assert_eq!(to_string(f, 1.0e1, Minus, 80000), format!("10.{:0>80000}", ""));
+ assert_eq!(to_string(f, 1.0e0, Minus, 80000), format!("1.{:0>80000}", ""));
+ assert_eq!(
+ to_string(f, 1.0e-1, Minus, 80000),
+ format!("0.1000000000000000055511151231257827021181583404541015625{:0>79945}", "")
+ );
+ assert_eq!(
+ to_string(f, 1.0e-20, Minus, 80000),
+ format!(
+ "0.0000000000000000000099999999999999994515327145420957165172950370\
+ 2787392447107715776066783064379706047475337982177734375{:0>79881}",
+ ""
+ )
+ );
+}
--- /dev/null
+#![cfg(not(target_arch = "wasm32"))]
+
+use std::mem::MaybeUninit;
+use std::str;
+
+use core::num::flt2dec::strategy::grisu::format_exact_opt;
+use core::num::flt2dec::strategy::grisu::format_shortest_opt;
+use core::num::flt2dec::MAX_SIG_DIGITS;
+use core::num::flt2dec::{decode, DecodableFloat, Decoded, FullDecoded};
+
+use rand::distributions::{Distribution, Uniform};
+use rand::rngs::StdRng;
+use rand::SeedableRng;
+
+pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+ FullDecoded::Finite(decoded) => decoded,
+ full_decoded => panic!("expected finite, got {:?} instead", full_decoded),
+ }
+}
+
+fn iterate<F, G, V>(func: &str, k: usize, n: usize, mut f: F, mut g: G, mut v: V) -> (usize, usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+ V: FnMut(usize) -> Decoded,
+{
+ assert!(k <= 1024);
+
+ let mut npassed = 0; // f(x) = Some(g(x))
+ let mut nignored = 0; // f(x) = None
+
+ for i in 0..n {
+ if (i & 0xfffff) == 0 {
+ println!(
+ "in progress, {:x}/{:x} (ignored={} passed={} failed={})",
+ i,
+ n,
+ nignored,
+ npassed,
+ i - nignored - npassed
+ );
+ }
+
+ let decoded = v(i);
+ let mut buf1 = [MaybeUninit::new(0); 1024];
+ if let Some((buf1, e1)) = f(&decoded, &mut buf1[..k]) {
+ let mut buf2 = [MaybeUninit::new(0); 1024];
+ let (buf2, e2) = g(&decoded, &mut buf2[..k]);
+ if e1 == e2 && buf1 == buf2 {
+ npassed += 1;
+ } else {
+ println!(
+ "equivalence test failed, {:x}/{:x}: {:?} f(i)={}e{} g(i)={}e{}",
+ i,
+ n,
+ decoded,
+ str::from_utf8(buf1).unwrap(),
+ e1,
+ str::from_utf8(buf2).unwrap(),
+ e2
+ );
+ }
+ } else {
+ nignored += 1;
+ }
+ }
+ println!(
+ "{}({}): done, ignored={} passed={} failed={}",
+ func,
+ k,
+ nignored,
+ npassed,
+ n - nignored - npassed
+ );
+ assert!(
+ nignored + npassed == n,
+ "{}({}): {} out of {} values returns an incorrect value!",
+ func,
+ k,
+ n - nignored - npassed,
+ n
+ );
+ (npassed, nignored)
+}
+
+pub fn f32_random_equivalence_test<F, G>(f: F, g: G, k: usize, n: usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ if cfg!(target_os = "emscripten") {
+ return; // using rng pulls in i128 support, which doesn't work
+ }
+ let mut rng = StdRng::from_entropy();
+ let f32_range = Uniform::new(0x0000_0001u32, 0x7f80_0000);
+ iterate("f32_random_equivalence_test", k, n, f, g, |_| {
+ let x = f32::from_bits(f32_range.sample(&mut rng));
+ decode_finite(x)
+ });
+}
+
+pub fn f64_random_equivalence_test<F, G>(f: F, g: G, k: usize, n: usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ if cfg!(target_os = "emscripten") {
+ return; // using rng pulls in i128 support, which doesn't work
+ }
+ let mut rng = StdRng::from_entropy();
+ let f64_range = Uniform::new(0x0000_0000_0000_0001u64, 0x7ff0_0000_0000_0000);
+ iterate("f64_random_equivalence_test", k, n, f, g, |_| {
+ let x = f64::from_bits(f64_range.sample(&mut rng));
+ decode_finite(x)
+ });
+}
+
+pub fn f32_exhaustive_equivalence_test<F, G>(f: F, g: G, k: usize)
+where
+ F: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> Option<(&'a [u8], i16)>,
+ G: for<'a> FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
+{
+ // we have only 2^23 * (2^8 - 1) - 1 = 2,139,095,039 positive finite f32 values,
+ // so why not simply testing all of them?
+ //
+ // this is of course very stressful (and thus should be behind an `#[ignore]` attribute),
+ // but with `-C opt-level=3 -C lto` this only takes about an hour or so.
+
+ // iterate from 0x0000_0001 to 0x7f7f_ffff, i.e., all finite ranges
+ let (npassed, nignored) =
+ iterate("f32_exhaustive_equivalence_test", k, 0x7f7f_ffff, f, g, |i: usize| {
+ let x = f32::from_bits(i as u32 + 1);
+ decode_finite(x)
+ });
+ assert_eq!((npassed, nignored), (2121451881, 17643158));
+}
+
+#[test]
+fn shortest_random_equivalence_test() {
+ use core::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ // Miri is too slow
+ let n = if cfg!(miri) { 10 } else { 10_000 };
+
+ f64_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, n);
+ f32_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, n);
+}
+
+#[test]
+#[ignore] // it is too expensive
+fn shortest_f32_exhaustive_equivalence_test() {
+ // it is hard to directly test the optimality of the output, but we can at least test if
+ // two different algorithms agree to each other.
+ //
+ // this reports the progress and the number of f32 values returned `None`.
+ // with `--nocapture` (and plenty of time and appropriate rustc flags), this should print:
+ // `done, ignored=17643158 passed=2121451881 failed=0`.
+
+ use core::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ f32_exhaustive_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS);
+}
+
+#[test]
+#[ignore] // it is too expensive
+fn shortest_f64_hard_random_equivalence_test() {
+ // this again probably has to use appropriate rustc flags.
+
+ use core::num::flt2dec::strategy::dragon::format_shortest as fallback;
+ f64_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, 100_000_000);
+}
+
+#[test]
+fn exact_f32_random_equivalence_test() {
+ use core::num::flt2dec::strategy::dragon::format_exact as fallback;
+ // Miri is too slow
+ let n = if cfg!(miri) { 3 } else { 1_000 };
+
+ for k in 1..21 {
+ f32_random_equivalence_test(
+ |d, buf| format_exact_opt(d, buf, i16::MIN),
+ |d, buf| fallback(d, buf, i16::MIN),
+ k,
+ n,
+ );
+ }
+}
+
+#[test]
+fn exact_f64_random_equivalence_test() {
+ use core::num::flt2dec::strategy::dragon::format_exact as fallback;
+ // Miri is too slow
+ let n = if cfg!(miri) { 2 } else { 1_000 };
+
+ for k in 1..21 {
+ f64_random_equivalence_test(
+ |d, buf| format_exact_opt(d, buf, i16::MIN),
+ |d, buf| fallback(d, buf, i16::MIN),
+ k,
+ n,
+ );
+ }
+}
--- /dev/null
+use super::super::*;
+use core::num::bignum::Big32x40 as Big;
+use core::num::flt2dec::strategy::dragon::*;
+
+#[test]
+fn test_mul_pow10() {
+ let mut prevpow10 = Big::from_small(1);
+ for i in 1..340 {
+ let mut curpow10 = Big::from_small(1);
+ mul_pow10(&mut curpow10, i);
+ assert_eq!(curpow10, *prevpow10.clone().mul_small(10));
+ prevpow10 = curpow10;
+ }
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn shortest_sanity_test() {
+ f64_shortest_sanity_test(format_shortest);
+ f32_shortest_sanity_test(format_shortest);
+ more_shortest_sanity_test(format_shortest);
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn exact_sanity_test() {
+ // This test ends up running what I can only assume is some corner-ish case
+ // of the `exp2` library function, defined in whatever C runtime we're
+ // using. In VS 2013 this function apparently had a bug as this test fails
+ // when linked, but with VS 2015 the bug appears fixed as the test runs just
+ // fine.
+ //
+ // The bug seems to be a difference in return value of `exp2(-1057)`, where
+ // in VS 2013 it returns a double with the bit pattern 0x2 and in VS 2015 it
+ // returns 0x20000.
+ //
+ // For now just ignore this test entirely on MSVC as it's tested elsewhere
+ // anyway and we're not super interested in testing each platform's exp2
+ // implementation.
+ if !cfg!(target_env = "msvc") {
+ f64_exact_sanity_test(format_exact);
+ }
+ f32_exact_sanity_test(format_exact);
+}
+
+#[test]
+fn test_to_shortest_str() {
+ to_shortest_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_shortest_exp_str() {
+ to_shortest_exp_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_exact_exp_str() {
+ to_exact_exp_str_test(format_exact);
+}
+
+#[test]
+fn test_to_exact_fixed_str() {
+ to_exact_fixed_str_test(format_exact);
+}
--- /dev/null
+use super::super::*;
+use core::num::flt2dec::strategy::grisu::*;
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_cached_power() {
+ assert_eq!(CACHED_POW10.first().unwrap().1, CACHED_POW10_FIRST_E);
+ assert_eq!(CACHED_POW10.last().unwrap().1, CACHED_POW10_LAST_E);
+
+ for e in -1137..961 {
+ // full range for f64
+ let low = ALPHA - e - 64;
+ let high = GAMMA - e - 64;
+ let (_k, cached) = cached_power(low, high);
+ assert!(
+ low <= cached.e && cached.e <= high,
+ "cached_power({}, {}) = {:?} is incorrect",
+ low,
+ high,
+ cached
+ );
+ }
+}
+
+#[test]
+fn test_max_pow10_no_more_than() {
+ let mut prevtenk = 1;
+ for k in 1..10 {
+ let tenk = prevtenk * 10;
+ assert_eq!(max_pow10_no_more_than(tenk - 1), (k - 1, prevtenk));
+ assert_eq!(max_pow10_no_more_than(tenk), (k, tenk));
+ prevtenk = tenk;
+ }
+}
+
+#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630
+#[test]
+fn shortest_sanity_test() {
+ f64_shortest_sanity_test(format_shortest);
+ f32_shortest_sanity_test(format_shortest);
+ more_shortest_sanity_test(format_shortest);
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn exact_sanity_test() {
+ // See comments in dragon.rs's exact_sanity_test for why this test is
+ // ignored on MSVC
+ if !cfg!(target_env = "msvc") {
+ f64_exact_sanity_test(format_exact);
+ }
+ f32_exact_sanity_test(format_exact);
+}
+
+#[test]
+fn test_to_shortest_str() {
+ to_shortest_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_shortest_exp_str() {
+ to_shortest_exp_str_test(format_shortest);
+}
+
+#[test]
+fn test_to_exact_exp_str() {
+ to_exact_exp_str_test(format_exact);
+}
+
+#[test]
+fn test_to_exact_fixed_str() {
+ to_exact_fixed_str_test(format_exact);
+}
--- /dev/null
+int_module!(i16, i16);
--- /dev/null
+int_module!(i32, i32);
+
+#[test]
+fn test_arith_operation() {
+ let a: isize = 10;
+ assert_eq!(a * (a - 1), 90);
+ let i32_a: isize = 10;
+ assert_eq!(i32_a, 10);
+ assert_eq!(i32_a - 10, 0);
+ assert_eq!(i32_a / 10, 1);
+ assert_eq!(i32_a - 20, -10);
+ assert_eq!(i32_a << 10, 10240);
+ assert_eq!(i32_a << 16, 655360);
+ assert_eq!(i32_a * 16, 160);
+ assert_eq!(i32_a * i32_a * i32_a, 1000);
+ assert_eq!(i32_a * i32_a * i32_a * i32_a, 10000);
+ assert_eq!(i32_a * i32_a / i32_a * i32_a, 100);
+ assert_eq!(i32_a * (i32_a - 1) << (2 + i32_a as usize), 368640);
+ let i32_b: isize = 0x10101010;
+ assert_eq!(i32_b + 1 - 1, i32_b);
+ assert_eq!(i32_b << 1, i32_b << 1);
+ assert_eq!(i32_b >> 1, i32_b >> 1);
+ assert_eq!(i32_b & i32_b << 1, 0);
+ assert_eq!(i32_b | i32_b << 1, 0x30303030);
+ let i32_c: isize = 0x10101010;
+ assert_eq!(
+ i32_c + i32_c * 2 / 3 * 2 + (i32_c - 7 % 3),
+ i32_c + i32_c * 2 / 3 * 2 + (i32_c - 7 % 3)
+ );
+}
--- /dev/null
+int_module!(i64, i64);
--- /dev/null
+int_module!(i8, i8);
--- /dev/null
+macro_rules! int_module {
+ ($T:ident, $T_i:ident) => {
+ #[cfg(test)]
+ mod tests {
+ use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+ use core::$T_i::*;
+
+ use crate::num;
+
+ #[test]
+ fn test_overflows() {
+ assert!(MAX > 0);
+ assert!(MIN <= 0);
+ assert_eq!(MIN + MAX + 1, 0);
+ }
+
+ #[test]
+ fn test_num() {
+ num::test_num(10 as $T, 2 as $T);
+ }
+
+ #[test]
+ fn test_rem_euclid() {
+ assert_eq!((-1 as $T).rem_euclid(MIN), MAX);
+ }
+
+ #[test]
+ pub fn test_abs() {
+ assert_eq!((1 as $T).abs(), 1 as $T);
+ assert_eq!((0 as $T).abs(), 0 as $T);
+ assert_eq!((-1 as $T).abs(), 1 as $T);
+ }
+
+ #[test]
+ fn test_signum() {
+ assert_eq!((1 as $T).signum(), 1 as $T);
+ assert_eq!((0 as $T).signum(), 0 as $T);
+ assert_eq!((-0 as $T).signum(), 0 as $T);
+ assert_eq!((-1 as $T).signum(), -1 as $T);
+ }
+
+ #[test]
+ fn test_is_positive() {
+ assert!((1 as $T).is_positive());
+ assert!(!(0 as $T).is_positive());
+ assert!(!(-0 as $T).is_positive());
+ assert!(!(-1 as $T).is_positive());
+ }
+
+ #[test]
+ fn test_is_negative() {
+ assert!(!(1 as $T).is_negative());
+ assert!(!(0 as $T).is_negative());
+ assert!(!(-0 as $T).is_negative());
+ assert!((-1 as $T).is_negative());
+ }
+
+ #[test]
+ fn test_bitwise_operators() {
+ assert_eq!(0b1110 as $T, (0b1100 as $T).bitor(0b1010 as $T));
+ assert_eq!(0b1000 as $T, (0b1100 as $T).bitand(0b1010 as $T));
+ assert_eq!(0b0110 as $T, (0b1100 as $T).bitxor(0b1010 as $T));
+ assert_eq!(0b1110 as $T, (0b0111 as $T).shl(1));
+ assert_eq!(0b0111 as $T, (0b1110 as $T).shr(1));
+ assert_eq!(-(0b11 as $T) - (1 as $T), (0b11 as $T).not());
+ }
+
+ const A: $T = 0b0101100;
+ const B: $T = 0b0100001;
+ const C: $T = 0b1111001;
+
+ const _0: $T = 0;
+ const _1: $T = !0;
+
+ #[test]
+ fn test_count_ones() {
+ assert_eq!(A.count_ones(), 3);
+ assert_eq!(B.count_ones(), 2);
+ assert_eq!(C.count_ones(), 5);
+ }
+
+ #[test]
+ fn test_count_zeros() {
+ assert_eq!(A.count_zeros(), $T::BITS - 3);
+ assert_eq!(B.count_zeros(), $T::BITS - 2);
+ assert_eq!(C.count_zeros(), $T::BITS - 5);
+ }
+
+ #[test]
+ fn test_leading_trailing_ones() {
+ let a: $T = 0b0101_1111;
+ assert_eq!(a.trailing_ones(), 5);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
+
+ assert_eq!(a.reverse_bits().leading_ones(), 5);
+
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
+
+ assert_eq!((_1 << 1).trailing_ones(), 0);
+ assert_eq!(MAX.leading_ones(), 0);
+
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!(MAX.trailing_ones(), $T::BITS - 1);
+
+ assert_eq!(_0.leading_ones(), 0);
+ assert_eq!(_0.trailing_ones(), 0);
+
+ let x: $T = 0b0010_1100;
+ assert_eq!(x.leading_ones(), 0);
+ assert_eq!(x.trailing_ones(), 0);
+ }
+
+ #[test]
+ fn test_rotate() {
+ assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+ assert_eq!(B.rotate_left(3).rotate_left(2).rotate_right(5), B);
+ assert_eq!(C.rotate_left(6).rotate_right(2).rotate_right(4), C);
+
+ // Rotating these should make no difference
+ //
+ // We test using 124 bits because to ensure that overlong bit shifts do
+ // not cause undefined behaviour. See #10183.
+ assert_eq!(_0.rotate_left(124), _0);
+ assert_eq!(_1.rotate_left(124), _1);
+ assert_eq!(_0.rotate_right(124), _0);
+ assert_eq!(_1.rotate_right(124), _1);
+
+ // Rotating by 0 should have no effect
+ assert_eq!(A.rotate_left(0), A);
+ assert_eq!(B.rotate_left(0), B);
+ assert_eq!(C.rotate_left(0), C);
+ // Rotating by a multiple of word size should also have no effect
+ assert_eq!(A.rotate_left(64), A);
+ assert_eq!(B.rotate_left(64), B);
+ assert_eq!(C.rotate_left(64), C);
+ }
+
+ #[test]
+ fn test_swap_bytes() {
+ assert_eq!(A.swap_bytes().swap_bytes(), A);
+ assert_eq!(B.swap_bytes().swap_bytes(), B);
+ assert_eq!(C.swap_bytes().swap_bytes(), C);
+
+ // Swapping these should make no difference
+ assert_eq!(_0.swap_bytes(), _0);
+ assert_eq!(_1.swap_bytes(), _1);
+ }
+
+ #[test]
+ fn test_le() {
+ assert_eq!($T::from_le(A.to_le()), A);
+ assert_eq!($T::from_le(B.to_le()), B);
+ assert_eq!($T::from_le(C.to_le()), C);
+ assert_eq!($T::from_le(_0), _0);
+ assert_eq!($T::from_le(_1), _1);
+ assert_eq!(_0.to_le(), _0);
+ assert_eq!(_1.to_le(), _1);
+ }
+
+ #[test]
+ fn test_be() {
+ assert_eq!($T::from_be(A.to_be()), A);
+ assert_eq!($T::from_be(B.to_be()), B);
+ assert_eq!($T::from_be(C.to_be()), C);
+ assert_eq!($T::from_be(_0), _0);
+ assert_eq!($T::from_be(_1), _1);
+ assert_eq!(_0.to_be(), _0);
+ assert_eq!(_1.to_be(), _1);
+ }
+
+ #[test]
+ fn test_signed_checked_div() {
+ assert_eq!((10 as $T).checked_div(2), Some(5));
+ assert_eq!((5 as $T).checked_div(0), None);
+ assert_eq!(isize::MIN.checked_div(-1), None);
+ }
+
+ #[test]
+ fn test_saturating_abs() {
+ assert_eq!((0 as $T).saturating_abs(), 0);
+ assert_eq!((123 as $T).saturating_abs(), 123);
+ assert_eq!((-123 as $T).saturating_abs(), 123);
+ assert_eq!((MAX - 2).saturating_abs(), MAX - 2);
+ assert_eq!((MAX - 1).saturating_abs(), MAX - 1);
+ assert_eq!(MAX.saturating_abs(), MAX);
+ assert_eq!((MIN + 2).saturating_abs(), MAX - 1);
+ assert_eq!((MIN + 1).saturating_abs(), MAX);
+ assert_eq!(MIN.saturating_abs(), MAX);
+ }
+
+ #[test]
+ fn test_saturating_neg() {
+ assert_eq!((0 as $T).saturating_neg(), 0);
+ assert_eq!((123 as $T).saturating_neg(), -123);
+ assert_eq!((-123 as $T).saturating_neg(), 123);
+ assert_eq!((MAX - 2).saturating_neg(), MIN + 3);
+ assert_eq!((MAX - 1).saturating_neg(), MIN + 2);
+ assert_eq!(MAX.saturating_neg(), MIN + 1);
+ assert_eq!((MIN + 2).saturating_neg(), MAX - 1);
+ assert_eq!((MIN + 1).saturating_neg(), MAX);
+ assert_eq!(MIN.saturating_neg(), MAX);
+ }
+
+ #[test]
+ fn test_from_str() {
+ fn from_str<T: std::str::FromStr>(t: &str) -> Option<T> {
+ std::str::FromStr::from_str(t).ok()
+ }
+ assert_eq!(from_str::<$T>("0"), Some(0 as $T));
+ assert_eq!(from_str::<$T>("3"), Some(3 as $T));
+ assert_eq!(from_str::<$T>("10"), Some(10 as $T));
+ assert_eq!(from_str::<i32>("123456789"), Some(123456789 as i32));
+ assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
+
+ assert_eq!(from_str::<$T>("-1"), Some(-1 as $T));
+ assert_eq!(from_str::<$T>("-3"), Some(-3 as $T));
+ assert_eq!(from_str::<$T>("-10"), Some(-10 as $T));
+ assert_eq!(from_str::<i32>("-123456789"), Some(-123456789 as i32));
+ assert_eq!(from_str::<$T>("-00100"), Some(-100 as $T));
+
+ assert_eq!(from_str::<$T>(""), None);
+ assert_eq!(from_str::<$T>(" "), None);
+ assert_eq!(from_str::<$T>("x"), None);
+ }
+
+ #[test]
+ fn test_from_str_radix() {
+ assert_eq!($T::from_str_radix("123", 10), Ok(123 as $T));
+ assert_eq!($T::from_str_radix("1001", 2), Ok(9 as $T));
+ assert_eq!($T::from_str_radix("123", 8), Ok(83 as $T));
+ assert_eq!(i32::from_str_radix("123", 16), Ok(291 as i32));
+ assert_eq!(i32::from_str_radix("ffff", 16), Ok(65535 as i32));
+ assert_eq!(i32::from_str_radix("FFFF", 16), Ok(65535 as i32));
+ assert_eq!($T::from_str_radix("z", 36), Ok(35 as $T));
+ assert_eq!($T::from_str_radix("Z", 36), Ok(35 as $T));
+
+ assert_eq!($T::from_str_radix("-123", 10), Ok(-123 as $T));
+ assert_eq!($T::from_str_radix("-1001", 2), Ok(-9 as $T));
+ assert_eq!($T::from_str_radix("-123", 8), Ok(-83 as $T));
+ assert_eq!(i32::from_str_radix("-123", 16), Ok(-291 as i32));
+ assert_eq!(i32::from_str_radix("-ffff", 16), Ok(-65535 as i32));
+ assert_eq!(i32::from_str_radix("-FFFF", 16), Ok(-65535 as i32));
+ assert_eq!($T::from_str_radix("-z", 36), Ok(-35 as $T));
+ assert_eq!($T::from_str_radix("-Z", 36), Ok(-35 as $T));
+
+ assert_eq!($T::from_str_radix("Z", 35).ok(), None::<$T>);
+ assert_eq!($T::from_str_radix("-9", 2).ok(), None::<$T>);
+ }
+
+ #[test]
+ fn test_pow() {
+ let mut r = 2 as $T;
+ assert_eq!(r.pow(2), 4 as $T);
+ assert_eq!(r.pow(0), 1 as $T);
+ assert_eq!(r.wrapping_pow(2), 4 as $T);
+ assert_eq!(r.wrapping_pow(0), 1 as $T);
+ assert_eq!(r.checked_pow(2), Some(4 as $T));
+ assert_eq!(r.checked_pow(0), Some(1 as $T));
+ assert_eq!(r.overflowing_pow(2), (4 as $T, false));
+ assert_eq!(r.overflowing_pow(0), (1 as $T, false));
+ assert_eq!(r.saturating_pow(2), 4 as $T);
+ assert_eq!(r.saturating_pow(0), 1 as $T);
+
+ r = MAX;
+ // use `^` to represent .pow() with no overflow.
+ // if itest::MAX == 2^j-1, then itest is a `j` bit int,
+ // so that `itest::MAX*itest::MAX == 2^(2*j)-2^(j+1)+1`,
+ // thussaturating_pow the overflowing result is exactly 1.
+ assert_eq!(r.wrapping_pow(2), 1 as $T);
+ assert_eq!(r.checked_pow(2), None);
+ assert_eq!(r.overflowing_pow(2), (1 as $T, true));
+ assert_eq!(r.saturating_pow(2), MAX);
+ //test for negative exponent.
+ r = -2 as $T;
+ assert_eq!(r.pow(2), 4 as $T);
+ assert_eq!(r.pow(3), -8 as $T);
+ assert_eq!(r.pow(0), 1 as $T);
+ assert_eq!(r.wrapping_pow(2), 4 as $T);
+ assert_eq!(r.wrapping_pow(3), -8 as $T);
+ assert_eq!(r.wrapping_pow(0), 1 as $T);
+ assert_eq!(r.checked_pow(2), Some(4 as $T));
+ assert_eq!(r.checked_pow(3), Some(-8 as $T));
+ assert_eq!(r.checked_pow(0), Some(1 as $T));
+ assert_eq!(r.overflowing_pow(2), (4 as $T, false));
+ assert_eq!(r.overflowing_pow(3), (-8 as $T, false));
+ assert_eq!(r.overflowing_pow(0), (1 as $T, false));
+ assert_eq!(r.saturating_pow(2), 4 as $T);
+ assert_eq!(r.saturating_pow(3), -8 as $T);
+ assert_eq!(r.saturating_pow(0), 1 as $T);
+ }
+ }
+ };
+}
--- /dev/null
+use core::cmp::PartialEq;
+use core::convert::{TryFrom, TryInto};
+use core::fmt::Debug;
+use core::marker::Copy;
+use core::num::{IntErrorKind, ParseIntError, TryFromIntError};
+use core::ops::{Add, Div, Mul, Rem, Sub};
+use core::option::Option;
+use core::option::Option::None;
+use core::str::FromStr;
+
+#[macro_use]
+mod int_macros;
+
+mod i16;
+mod i32;
+mod i64;
+mod i8;
+
+#[macro_use]
+mod uint_macros;
+
+mod u16;
+mod u32;
+mod u64;
+mod u8;
+
+mod bignum;
+mod dec2flt;
+mod flt2dec;
+
+mod nan;
+
+/// Adds the attribute to all items in the block.
+macro_rules! cfg_block {
+ ($(#[$attr:meta]{$($it:item)*})*) => {$($(
+ #[$attr]
+ $it
+ )*)*}
+}
+
+/// Groups items that assume the pointer width is either 16/32/64, and has to be altered if
+/// support for larger/smaller pointer widths are added in the future.
+macro_rules! assume_usize_width {
+ {$($it:item)*} => {#[cfg(not(any(
+ target_pointer_width = "16", target_pointer_width = "32", target_pointer_width = "64")))]
+ compile_error!("The current tests of try_from on usize/isize assume that \
+ the pointer width is either 16, 32, or 64");
+ $($it)*
+ }
+}
+
+/// Helper function for testing numeric operations
+pub fn test_num<T>(ten: T, two: T)
+where
+ T: PartialEq
+ + Add<Output = T>
+ + Sub<Output = T>
+ + Mul<Output = T>
+ + Div<Output = T>
+ + Rem<Output = T>
+ + Debug
+ + Copy,
+{
+ assert_eq!(ten.add(two), ten + two);
+ assert_eq!(ten.sub(two), ten - two);
+ assert_eq!(ten.mul(two), ten * two);
+ assert_eq!(ten.div(two), ten / two);
+ assert_eq!(ten.rem(two), ten % two);
+}
+
+/// Helper function for asserting number parsing returns a specific error
+fn test_parse<T>(num_str: &str, expected: Result<T, IntErrorKind>)
+where
+ T: FromStr<Err = ParseIntError>,
+ Result<T, IntErrorKind>: PartialEq + Debug,
+{
+ assert_eq!(num_str.parse::<T>().map_err(|e| e.kind().clone()), expected)
+}
+
+#[test]
+fn from_str_issue7588() {
+ let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+ assert_eq!(u, None);
+ let s: Option<i16> = i16::from_str_radix("80000", 10).ok();
+ assert_eq!(s, None);
+}
+
+#[test]
+fn test_int_from_str_overflow() {
+ test_parse::<i8>("127", Ok(127));
+ test_parse::<i8>("128", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i8>("-128", Ok(-128));
+ test_parse::<i8>("-129", Err(IntErrorKind::NegOverflow));
+
+ test_parse::<i16>("32767", Ok(32_767));
+ test_parse::<i16>("32768", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i16>("-32768", Ok(-32_768));
+ test_parse::<i16>("-32769", Err(IntErrorKind::NegOverflow));
+
+ test_parse::<i32>("2147483647", Ok(2_147_483_647));
+ test_parse::<i32>("2147483648", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i32>("-2147483648", Ok(-2_147_483_648));
+ test_parse::<i32>("-2147483649", Err(IntErrorKind::NegOverflow));
+
+ test_parse::<i64>("9223372036854775807", Ok(9_223_372_036_854_775_807));
+ test_parse::<i64>("9223372036854775808", Err(IntErrorKind::PosOverflow));
+
+ test_parse::<i64>("-9223372036854775808", Ok(-9_223_372_036_854_775_808));
+ test_parse::<i64>("-9223372036854775809", Err(IntErrorKind::NegOverflow));
+}
+
+#[test]
+fn test_leading_plus() {
+ test_parse::<u8>("+127", Ok(127));
+ test_parse::<i64>("+9223372036854775807", Ok(9223372036854775807));
+}
+
+#[test]
+fn test_invalid() {
+ test_parse::<i8>("--129", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("++129", Err(IntErrorKind::InvalidDigit));
+ test_parse::<u8>("Съешь", Err(IntErrorKind::InvalidDigit));
+ test_parse::<u8>("123Hello", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("--", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("-", Err(IntErrorKind::InvalidDigit));
+ test_parse::<i8>("+", Err(IntErrorKind::InvalidDigit));
+ test_parse::<u8>("-1", Err(IntErrorKind::InvalidDigit));
+}
+
+#[test]
+fn test_empty() {
+ test_parse::<u8>("", Err(IntErrorKind::Empty));
+}
+
+#[test]
+fn test_infallible_try_from_int_error() {
+ let func = |x: i8| -> Result<i32, TryFromIntError> { Ok(x.try_into()?) };
+
+ assert!(func(0).is_ok());
+}
+
+macro_rules! test_impl_from {
+ ($fn_name:ident, bool, $target: ty) => {
+ #[test]
+ fn $fn_name() {
+ let one: $target = 1;
+ let zero: $target = 0;
+ assert_eq!(one, <$target>::from(true));
+ assert_eq!(zero, <$target>::from(false));
+ }
+ };
+ ($fn_name: ident, $Small: ty, $Large: ty) => {
+ #[test]
+ fn $fn_name() {
+ let small_max = <$Small>::MAX;
+ let small_min = <$Small>::MIN;
+ let large_max: $Large = small_max.into();
+ let large_min: $Large = small_min.into();
+ assert_eq!(large_max as $Small, small_max);
+ assert_eq!(large_min as $Small, small_min);
+ }
+ };
+}
+
+// Unsigned -> Unsigned
+test_impl_from! { test_u8u16, u8, u16 }
+test_impl_from! { test_u8u32, u8, u32 }
+test_impl_from! { test_u8u64, u8, u64 }
+test_impl_from! { test_u8usize, u8, usize }
+test_impl_from! { test_u16u32, u16, u32 }
+test_impl_from! { test_u16u64, u16, u64 }
+test_impl_from! { test_u32u64, u32, u64 }
+
+// Signed -> Signed
+test_impl_from! { test_i8i16, i8, i16 }
+test_impl_from! { test_i8i32, i8, i32 }
+test_impl_from! { test_i8i64, i8, i64 }
+test_impl_from! { test_i8isize, i8, isize }
+test_impl_from! { test_i16i32, i16, i32 }
+test_impl_from! { test_i16i64, i16, i64 }
+test_impl_from! { test_i32i64, i32, i64 }
+
+// Unsigned -> Signed
+test_impl_from! { test_u8i16, u8, i16 }
+test_impl_from! { test_u8i32, u8, i32 }
+test_impl_from! { test_u8i64, u8, i64 }
+test_impl_from! { test_u16i32, u16, i32 }
+test_impl_from! { test_u16i64, u16, i64 }
+test_impl_from! { test_u32i64, u32, i64 }
+
+// Bool -> Integer
+test_impl_from! { test_boolu8, bool, u8 }
+test_impl_from! { test_boolu16, bool, u16 }
+test_impl_from! { test_boolu32, bool, u32 }
+test_impl_from! { test_boolu64, bool, u64 }
+test_impl_from! { test_boolu128, bool, u128 }
+test_impl_from! { test_booli8, bool, i8 }
+test_impl_from! { test_booli16, bool, i16 }
+test_impl_from! { test_booli32, bool, i32 }
+test_impl_from! { test_booli64, bool, i64 }
+test_impl_from! { test_booli128, bool, i128 }
+
+// Signed -> Float
+test_impl_from! { test_i8f32, i8, f32 }
+test_impl_from! { test_i8f64, i8, f64 }
+test_impl_from! { test_i16f32, i16, f32 }
+test_impl_from! { test_i16f64, i16, f64 }
+test_impl_from! { test_i32f64, i32, f64 }
+
+// Unsigned -> Float
+test_impl_from! { test_u8f32, u8, f32 }
+test_impl_from! { test_u8f64, u8, f64 }
+test_impl_from! { test_u16f32, u16, f32 }
+test_impl_from! { test_u16f64, u16, f64 }
+test_impl_from! { test_u32f64, u32, f64 }
+
+// Float -> Float
+#[test]
+fn test_f32f64() {
+ let max: f64 = f32::MAX.into();
+ assert_eq!(max as f32, f32::MAX);
+ assert!(max.is_normal());
+
+ let min: f64 = f32::MIN.into();
+ assert_eq!(min as f32, f32::MIN);
+ assert!(min.is_normal());
+
+ let min_positive: f64 = f32::MIN_POSITIVE.into();
+ assert_eq!(min_positive as f32, f32::MIN_POSITIVE);
+ assert!(min_positive.is_normal());
+
+ let epsilon: f64 = f32::EPSILON.into();
+ assert_eq!(epsilon as f32, f32::EPSILON);
+ assert!(epsilon.is_normal());
+
+ let zero: f64 = (0.0f32).into();
+ assert_eq!(zero as f32, 0.0f32);
+ assert!(zero.is_sign_positive());
+
+ let neg_zero: f64 = (-0.0f32).into();
+ assert_eq!(neg_zero as f32, -0.0f32);
+ assert!(neg_zero.is_sign_negative());
+
+ let infinity: f64 = f32::INFINITY.into();
+ assert_eq!(infinity as f32, f32::INFINITY);
+ assert!(infinity.is_infinite());
+ assert!(infinity.is_sign_positive());
+
+ let neg_infinity: f64 = f32::NEG_INFINITY.into();
+ assert_eq!(neg_infinity as f32, f32::NEG_INFINITY);
+ assert!(neg_infinity.is_infinite());
+ assert!(neg_infinity.is_sign_negative());
+
+ let nan: f64 = f32::NAN.into();
+ assert!(nan.is_nan());
+}
+
+/// Conversions where the full width of $source can be represented as $target
+macro_rules! test_impl_try_from_always_ok {
+ ($fn_name:ident, $source:ty, $target: ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ assert_eq!(<$target as TryFrom<$source>>::try_from(max).unwrap(), max as $target);
+ assert_eq!(<$target as TryFrom<$source>>::try_from(min).unwrap(), min as $target);
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ }
+ };
+}
+
+test_impl_try_from_always_ok! { test_try_u8u8, u8, u8 }
+test_impl_try_from_always_ok! { test_try_u8u16, u8, u16 }
+test_impl_try_from_always_ok! { test_try_u8u32, u8, u32 }
+test_impl_try_from_always_ok! { test_try_u8u64, u8, u64 }
+test_impl_try_from_always_ok! { test_try_u8u128, u8, u128 }
+test_impl_try_from_always_ok! { test_try_u8i16, u8, i16 }
+test_impl_try_from_always_ok! { test_try_u8i32, u8, i32 }
+test_impl_try_from_always_ok! { test_try_u8i64, u8, i64 }
+test_impl_try_from_always_ok! { test_try_u8i128, u8, i128 }
+
+test_impl_try_from_always_ok! { test_try_u16u16, u16, u16 }
+test_impl_try_from_always_ok! { test_try_u16u32, u16, u32 }
+test_impl_try_from_always_ok! { test_try_u16u64, u16, u64 }
+test_impl_try_from_always_ok! { test_try_u16u128, u16, u128 }
+test_impl_try_from_always_ok! { test_try_u16i32, u16, i32 }
+test_impl_try_from_always_ok! { test_try_u16i64, u16, i64 }
+test_impl_try_from_always_ok! { test_try_u16i128, u16, i128 }
+
+test_impl_try_from_always_ok! { test_try_u32u32, u32, u32 }
+test_impl_try_from_always_ok! { test_try_u32u64, u32, u64 }
+test_impl_try_from_always_ok! { test_try_u32u128, u32, u128 }
+test_impl_try_from_always_ok! { test_try_u32i64, u32, i64 }
+test_impl_try_from_always_ok! { test_try_u32i128, u32, i128 }
+
+test_impl_try_from_always_ok! { test_try_u64u64, u64, u64 }
+test_impl_try_from_always_ok! { test_try_u64u128, u64, u128 }
+test_impl_try_from_always_ok! { test_try_u64i128, u64, i128 }
+
+test_impl_try_from_always_ok! { test_try_u128u128, u128, u128 }
+
+test_impl_try_from_always_ok! { test_try_i8i8, i8, i8 }
+test_impl_try_from_always_ok! { test_try_i8i16, i8, i16 }
+test_impl_try_from_always_ok! { test_try_i8i32, i8, i32 }
+test_impl_try_from_always_ok! { test_try_i8i64, i8, i64 }
+test_impl_try_from_always_ok! { test_try_i8i128, i8, i128 }
+
+test_impl_try_from_always_ok! { test_try_i16i16, i16, i16 }
+test_impl_try_from_always_ok! { test_try_i16i32, i16, i32 }
+test_impl_try_from_always_ok! { test_try_i16i64, i16, i64 }
+test_impl_try_from_always_ok! { test_try_i16i128, i16, i128 }
+
+test_impl_try_from_always_ok! { test_try_i32i32, i32, i32 }
+test_impl_try_from_always_ok! { test_try_i32i64, i32, i64 }
+test_impl_try_from_always_ok! { test_try_i32i128, i32, i128 }
+
+test_impl_try_from_always_ok! { test_try_i64i64, i64, i64 }
+test_impl_try_from_always_ok! { test_try_i64i128, i64, i128 }
+
+test_impl_try_from_always_ok! { test_try_i128i128, i128, i128 }
+
+test_impl_try_from_always_ok! { test_try_usizeusize, usize, usize }
+test_impl_try_from_always_ok! { test_try_isizeisize, isize, isize }
+
+assume_usize_width! {
+ test_impl_try_from_always_ok! { test_try_u8usize, u8, usize }
+ test_impl_try_from_always_ok! { test_try_u8isize, u8, isize }
+ test_impl_try_from_always_ok! { test_try_i8isize, i8, isize }
+
+ test_impl_try_from_always_ok! { test_try_u16usize, u16, usize }
+ test_impl_try_from_always_ok! { test_try_i16isize, i16, isize }
+
+ test_impl_try_from_always_ok! { test_try_usizeu64, usize, u64 }
+ test_impl_try_from_always_ok! { test_try_usizeu128, usize, u128 }
+ test_impl_try_from_always_ok! { test_try_usizei128, usize, i128 }
+
+ test_impl_try_from_always_ok! { test_try_isizei64, isize, i64 }
+ test_impl_try_from_always_ok! { test_try_isizei128, isize, i128 }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_always_ok! { test_try_usizeu16, usize, u16 }
+ test_impl_try_from_always_ok! { test_try_isizei16, isize, i16 }
+ test_impl_try_from_always_ok! { test_try_usizeu32, usize, u32 }
+ test_impl_try_from_always_ok! { test_try_usizei32, usize, i32 }
+ test_impl_try_from_always_ok! { test_try_isizei32, isize, i32 }
+ test_impl_try_from_always_ok! { test_try_usizei64, usize, i64 }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_always_ok! { test_try_u16isize, u16, isize }
+ test_impl_try_from_always_ok! { test_try_usizeu32, usize, u32 }
+ test_impl_try_from_always_ok! { test_try_isizei32, isize, i32 }
+ test_impl_try_from_always_ok! { test_try_u32usize, u32, usize }
+ test_impl_try_from_always_ok! { test_try_i32isize, i32, isize }
+ test_impl_try_from_always_ok! { test_try_usizei64, usize, i64 }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_always_ok! { test_try_u16isize, u16, isize }
+ test_impl_try_from_always_ok! { test_try_u32usize, u32, usize }
+ test_impl_try_from_always_ok! { test_try_u32isize, u32, isize }
+ test_impl_try_from_always_ok! { test_try_i32isize, i32, isize }
+ test_impl_try_from_always_ok! { test_try_u64usize, u64, usize }
+ test_impl_try_from_always_ok! { test_try_i64isize, i64, isize }
+ }
+ );
+}
+
+/// Conversions where max of $source can be represented as $target,
+macro_rules! test_impl_try_from_signed_to_unsigned_upper_ok {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ let neg_one: $source = -1;
+ assert_eq!(<$target as TryFrom<$source>>::try_from(max).unwrap(), max as $target);
+ assert!(<$target as TryFrom<$source>>::try_from(min).is_err());
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ assert!(<$target as TryFrom<$source>>::try_from(neg_one).is_err());
+ }
+ };
+}
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u8, i8, u8 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u16, i8, u16 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u32, i8, u32 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u64, i8, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u128, i8, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u16, i16, u16 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u32, i16, u32 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u64, i16, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u128, i16, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u32, i32, u32 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u64, i32, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u128, i32, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64u64, i64, u64 }
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64u128, i64, u128 }
+
+test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i128u128, i128, u128 }
+
+assume_usize_width! {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8usize, i8, usize }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16usize, i16, usize }
+
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu64, isize, u64 }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu128, isize, u128 }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeusize, isize, usize }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu16, isize, u16 }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu32, isize, u32 }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_isizeu32, isize, u32 }
+
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32usize, i32, usize }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32usize, i32, usize }
+ test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64usize, i64, usize }
+ }
+ );
+}
+
+/// Conversions where max of $source can not be represented as $target,
+/// but min can.
+macro_rules! test_impl_try_from_unsigned_to_signed_upper_err {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ assert!(<$target as TryFrom<$source>>::try_from(max).is_err());
+ assert_eq!(<$target as TryFrom<$source>>::try_from(min).unwrap(), min as $target);
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ }
+ };
+}
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u8i8, u8, i8 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16i8, u16, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16i16, u16, i16 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i8, u32, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i16, u32, i16 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i32, u32, i32 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i8, u64, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i16, u64, i16 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i32, u64, i32 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i64, u64, i64 }
+
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i8, u128, i8 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i16, u128, i16 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i32, u128, i32 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i64, u128, i64 }
+test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128i128, u128, i128 }
+
+assume_usize_width! {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64isize, u64, isize }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u128isize, u128, isize }
+
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei8, usize, i8 }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei16, usize, i16 }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizeisize, usize, isize }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16isize, u16, isize }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32isize, u32, isize }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32isize, u32, isize }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei32, usize, i32 }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei32, usize, i32 }
+ test_impl_try_from_unsigned_to_signed_upper_err! { test_try_usizei64, usize, i64 }
+ }
+ );
+}
+
+/// Conversions where min/max of $source can not be represented as $target.
+macro_rules! test_impl_try_from_same_sign_err {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ let t_max = <$target>::MAX;
+ let t_min = <$target>::MIN;
+ assert!(<$target as TryFrom<$source>>::try_from(max).is_err());
+ if min != 0 {
+ assert!(<$target as TryFrom<$source>>::try_from(min).is_err());
+ }
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_max as $source).unwrap(),
+ t_max as $target
+ );
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_min as $source).unwrap(),
+ t_min as $target
+ );
+ }
+ };
+}
+
+test_impl_try_from_same_sign_err! { test_try_u16u8, u16, u8 }
+
+test_impl_try_from_same_sign_err! { test_try_u32u8, u32, u8 }
+test_impl_try_from_same_sign_err! { test_try_u32u16, u32, u16 }
+
+test_impl_try_from_same_sign_err! { test_try_u64u8, u64, u8 }
+test_impl_try_from_same_sign_err! { test_try_u64u16, u64, u16 }
+test_impl_try_from_same_sign_err! { test_try_u64u32, u64, u32 }
+
+test_impl_try_from_same_sign_err! { test_try_u128u8, u128, u8 }
+test_impl_try_from_same_sign_err! { test_try_u128u16, u128, u16 }
+test_impl_try_from_same_sign_err! { test_try_u128u32, u128, u32 }
+test_impl_try_from_same_sign_err! { test_try_u128u64, u128, u64 }
+
+test_impl_try_from_same_sign_err! { test_try_i16i8, i16, i8 }
+test_impl_try_from_same_sign_err! { test_try_isizei8, isize, i8 }
+
+test_impl_try_from_same_sign_err! { test_try_i32i8, i32, i8 }
+test_impl_try_from_same_sign_err! { test_try_i32i16, i32, i16 }
+
+test_impl_try_from_same_sign_err! { test_try_i64i8, i64, i8 }
+test_impl_try_from_same_sign_err! { test_try_i64i16, i64, i16 }
+test_impl_try_from_same_sign_err! { test_try_i64i32, i64, i32 }
+
+test_impl_try_from_same_sign_err! { test_try_i128i8, i128, i8 }
+test_impl_try_from_same_sign_err! { test_try_i128i16, i128, i16 }
+test_impl_try_from_same_sign_err! { test_try_i128i32, i128, i32 }
+test_impl_try_from_same_sign_err! { test_try_i128i64, i128, i64 }
+
+assume_usize_width! {
+ test_impl_try_from_same_sign_err! { test_try_usizeu8, usize, u8 }
+ test_impl_try_from_same_sign_err! { test_try_u128usize, u128, usize }
+ test_impl_try_from_same_sign_err! { test_try_i128isize, i128, isize }
+
+ cfg_block!(
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_same_sign_err! { test_try_u32usize, u32, usize }
+ test_impl_try_from_same_sign_err! { test_try_u64usize, u64, usize }
+
+ test_impl_try_from_same_sign_err! { test_try_i32isize, i32, isize }
+ test_impl_try_from_same_sign_err! { test_try_i64isize, i64, isize }
+ }
+
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_same_sign_err! { test_try_u64usize, u64, usize }
+ test_impl_try_from_same_sign_err! { test_try_usizeu16, usize, u16 }
+
+ test_impl_try_from_same_sign_err! { test_try_i64isize, i64, isize }
+ test_impl_try_from_same_sign_err! { test_try_isizei16, isize, i16 }
+ }
+
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_same_sign_err! { test_try_usizeu16, usize, u16 }
+ test_impl_try_from_same_sign_err! { test_try_usizeu32, usize, u32 }
+
+ test_impl_try_from_same_sign_err! { test_try_isizei16, isize, i16 }
+ test_impl_try_from_same_sign_err! { test_try_isizei32, isize, i32 }
+ }
+ );
+}
+
+/// Conversions where neither the min nor the max of $source can be represented by
+/// $target, but max/min of the target can be represented by the source.
+macro_rules! test_impl_try_from_signed_to_unsigned_err {
+ ($fn_name:ident, $source:ty, $target:ty) => {
+ #[test]
+ fn $fn_name() {
+ let max = <$source>::MAX;
+ let min = <$source>::MIN;
+ let zero: $source = 0;
+ let t_max = <$target>::MAX;
+ let t_min = <$target>::MIN;
+ assert!(<$target as TryFrom<$source>>::try_from(max).is_err());
+ assert!(<$target as TryFrom<$source>>::try_from(min).is_err());
+ assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), zero as $target);
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_max as $source).unwrap(),
+ t_max as $target
+ );
+ assert_eq!(
+ <$target as TryFrom<$source>>::try_from(t_min as $source).unwrap(),
+ t_min as $target
+ );
+ }
+ };
+}
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i16u8, i16, u8 }
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i32u8, i32, u8 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i32u16, i32, u16 }
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i64u8, i64, u8 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i64u16, i64, u16 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i64u32, i64, u32 }
+
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u8, i128, u8 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u16, i128, u16 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u32, i128, u32 }
+test_impl_try_from_signed_to_unsigned_err! { test_try_i128u64, i128, u64 }
+
+assume_usize_width! {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu8, isize, u8 }
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i128usize, i128, usize }
+
+ cfg_block! {
+ #[cfg(target_pointer_width = "16")] {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i32usize, i32, usize }
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i64usize, i64, usize }
+ }
+ #[cfg(target_pointer_width = "32")] {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_i64usize, i64, usize }
+
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu16, isize, u16 }
+ }
+ #[cfg(target_pointer_width = "64")] {
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu16, isize, u16 }
+ test_impl_try_from_signed_to_unsigned_err! { test_try_isizeu32, isize, u32 }
+ }
+ }
+}
+
+macro_rules! test_float {
+ ($modname: ident, $fty: ty, $inf: expr, $neginf: expr, $nan: expr) => {
+ mod $modname {
+ #[test]
+ fn min() {
+ assert_eq!((0.0 as $fty).min(0.0), 0.0);
+ assert!((0.0 as $fty).min(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).min(-0.0), -0.0);
+ assert!((-0.0 as $fty).min(-0.0).is_sign_negative());
+ assert_eq!((9.0 as $fty).min(9.0), 9.0);
+ assert_eq!((-9.0 as $fty).min(0.0), -9.0);
+ assert_eq!((0.0 as $fty).min(9.0), 0.0);
+ assert!((0.0 as $fty).min(9.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).min(9.0), -0.0);
+ assert!((-0.0 as $fty).min(9.0).is_sign_negative());
+ assert_eq!((-0.0 as $fty).min(-9.0), -9.0);
+ assert_eq!(($inf as $fty).min(9.0), 9.0);
+ assert_eq!((9.0 as $fty).min($inf), 9.0);
+ assert_eq!(($inf as $fty).min(-9.0), -9.0);
+ assert_eq!((-9.0 as $fty).min($inf), -9.0);
+ assert_eq!(($neginf as $fty).min(9.0), $neginf);
+ assert_eq!((9.0 as $fty).min($neginf), $neginf);
+ assert_eq!(($neginf as $fty).min(-9.0), $neginf);
+ assert_eq!((-9.0 as $fty).min($neginf), $neginf);
+ assert_eq!(($nan as $fty).min(9.0), 9.0);
+ assert_eq!(($nan as $fty).min(-9.0), -9.0);
+ assert_eq!((9.0 as $fty).min($nan), 9.0);
+ assert_eq!((-9.0 as $fty).min($nan), -9.0);
+ assert!(($nan as $fty).min($nan).is_nan());
+ }
+ #[test]
+ fn max() {
+ assert_eq!((0.0 as $fty).max(0.0), 0.0);
+ assert!((0.0 as $fty).max(0.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).max(-0.0), -0.0);
+ assert!((-0.0 as $fty).max(-0.0).is_sign_negative());
+ assert_eq!((9.0 as $fty).max(9.0), 9.0);
+ assert_eq!((-9.0 as $fty).max(0.0), 0.0);
+ assert!((-9.0 as $fty).max(0.0).is_sign_positive());
+ assert_eq!((-9.0 as $fty).max(-0.0), -0.0);
+ assert!((-9.0 as $fty).max(-0.0).is_sign_negative());
+ assert_eq!((0.0 as $fty).max(9.0), 9.0);
+ assert_eq!((0.0 as $fty).max(-9.0), 0.0);
+ assert!((0.0 as $fty).max(-9.0).is_sign_positive());
+ assert_eq!((-0.0 as $fty).max(-9.0), -0.0);
+ assert!((-0.0 as $fty).max(-9.0).is_sign_negative());
+ assert_eq!(($inf as $fty).max(9.0), $inf);
+ assert_eq!((9.0 as $fty).max($inf), $inf);
+ assert_eq!(($inf as $fty).max(-9.0), $inf);
+ assert_eq!((-9.0 as $fty).max($inf), $inf);
+ assert_eq!(($neginf as $fty).max(9.0), 9.0);
+ assert_eq!((9.0 as $fty).max($neginf), 9.0);
+ assert_eq!(($neginf as $fty).max(-9.0), -9.0);
+ assert_eq!((-9.0 as $fty).max($neginf), -9.0);
+ assert_eq!(($nan as $fty).max(9.0), 9.0);
+ assert_eq!(($nan as $fty).max(-9.0), -9.0);
+ assert_eq!((9.0 as $fty).max($nan), 9.0);
+ assert_eq!((-9.0 as $fty).max($nan), -9.0);
+ assert!(($nan as $fty).max($nan).is_nan());
+ }
+ #[test]
+ fn rem_euclid() {
+ let a: $fty = 42.0;
+ assert!($inf.rem_euclid(a).is_nan());
+ assert_eq!(a.rem_euclid($inf), a);
+ assert!(a.rem_euclid($nan).is_nan());
+ assert!($inf.rem_euclid($inf).is_nan());
+ assert!($inf.rem_euclid($nan).is_nan());
+ assert!($nan.rem_euclid($inf).is_nan());
+ }
+ #[test]
+ fn div_euclid() {
+ let a: $fty = 42.0;
+ assert_eq!(a.div_euclid($inf), 0.0);
+ assert!(a.div_euclid($nan).is_nan());
+ assert!($inf.div_euclid($inf).is_nan());
+ assert!($inf.div_euclid($nan).is_nan());
+ assert!($nan.div_euclid($inf).is_nan());
+ }
+ }
+ };
+}
+
+test_float!(f32, f32, f32::INFINITY, f32::NEG_INFINITY, f32::NAN);
+test_float!(f64, f64, f64::INFINITY, f64::NEG_INFINITY, f64::NAN);
--- /dev/null
+#[test]
+fn test_nan() {
+ use core::f64;
+ let x = "NaN".to_string();
+ assert_eq!(format!("{}", f64::NAN), x);
+ assert_eq!(format!("{:e}", f64::NAN), x);
+ assert_eq!(format!("{:E}", f64::NAN), x);
+}
--- /dev/null
+uint_module!(u16, u16);
--- /dev/null
+uint_module!(u32, u32);
--- /dev/null
+uint_module!(u64, u64);
--- /dev/null
+uint_module!(u8, u8);
--- /dev/null
+macro_rules! uint_module {
+ ($T:ident, $T_i:ident) => {
+ #[cfg(test)]
+ mod tests {
+ use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
+ use core::$T_i::*;
+ use std::str::FromStr;
+
+ use crate::num;
+
+ #[test]
+ fn test_overflows() {
+ assert!(MAX > 0);
+ assert!(MIN <= 0);
+ assert!((MIN + MAX).wrapping_add(1) == 0);
+ }
+
+ #[test]
+ fn test_num() {
+ num::test_num(10 as $T, 2 as $T);
+ }
+
+ #[test]
+ fn test_bitwise_operators() {
+ assert!(0b1110 as $T == (0b1100 as $T).bitor(0b1010 as $T));
+ assert!(0b1000 as $T == (0b1100 as $T).bitand(0b1010 as $T));
+ assert!(0b0110 as $T == (0b1100 as $T).bitxor(0b1010 as $T));
+ assert!(0b1110 as $T == (0b0111 as $T).shl(1));
+ assert!(0b0111 as $T == (0b1110 as $T).shr(1));
+ assert!(MAX - (0b1011 as $T) == (0b1011 as $T).not());
+ }
+
+ const A: $T = 0b0101100;
+ const B: $T = 0b0100001;
+ const C: $T = 0b1111001;
+
+ const _0: $T = 0;
+ const _1: $T = !0;
+
+ #[test]
+ fn test_count_ones() {
+ assert!(A.count_ones() == 3);
+ assert!(B.count_ones() == 2);
+ assert!(C.count_ones() == 5);
+ }
+
+ #[test]
+ fn test_count_zeros() {
+ assert!(A.count_zeros() == $T::BITS - 3);
+ assert!(B.count_zeros() == $T::BITS - 2);
+ assert!(C.count_zeros() == $T::BITS - 5);
+ }
+
+ #[test]
+ fn test_leading_trailing_ones() {
+ let a: $T = 0b0101_1111;
+ assert_eq!(a.trailing_ones(), 5);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
+
+ assert_eq!(a.reverse_bits().leading_ones(), 5);
+
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
+
+ assert_eq!((_1 << 1).trailing_ones(), 0);
+ assert_eq!((_1 >> 1).leading_ones(), 0);
+
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!((_1 >> 1).trailing_ones(), $T::BITS - 1);
+
+ assert_eq!(_0.leading_ones(), 0);
+ assert_eq!(_0.trailing_ones(), 0);
+
+ let x: $T = 0b0010_1100;
+ assert_eq!(x.leading_ones(), 0);
+ assert_eq!(x.trailing_ones(), 0);
+ }
+
+ #[test]
+ fn test_rotate() {
+ assert_eq!(A.rotate_left(6).rotate_right(2).rotate_right(4), A);
+ assert_eq!(B.rotate_left(3).rotate_left(2).rotate_right(5), B);
+ assert_eq!(C.rotate_left(6).rotate_right(2).rotate_right(4), C);
+
+ // Rotating these should make no difference
+ //
+ // We test using 124 bits because to ensure that overlong bit shifts do
+ // not cause undefined behaviour. See #10183.
+ assert_eq!(_0.rotate_left(124), _0);
+ assert_eq!(_1.rotate_left(124), _1);
+ assert_eq!(_0.rotate_right(124), _0);
+ assert_eq!(_1.rotate_right(124), _1);
+
+ // Rotating by 0 should have no effect
+ assert_eq!(A.rotate_left(0), A);
+ assert_eq!(B.rotate_left(0), B);
+ assert_eq!(C.rotate_left(0), C);
+ // Rotating by a multiple of word size should also have no effect
+ assert_eq!(A.rotate_left(64), A);
+ assert_eq!(B.rotate_left(64), B);
+ assert_eq!(C.rotate_left(64), C);
+ }
+
+ #[test]
+ fn test_swap_bytes() {
+ assert_eq!(A.swap_bytes().swap_bytes(), A);
+ assert_eq!(B.swap_bytes().swap_bytes(), B);
+ assert_eq!(C.swap_bytes().swap_bytes(), C);
+
+ // Swapping these should make no difference
+ assert_eq!(_0.swap_bytes(), _0);
+ assert_eq!(_1.swap_bytes(), _1);
+ }
+
+ #[test]
+ fn test_reverse_bits() {
+ assert_eq!(A.reverse_bits().reverse_bits(), A);
+ assert_eq!(B.reverse_bits().reverse_bits(), B);
+ assert_eq!(C.reverse_bits().reverse_bits(), C);
+
+ // Swapping these should make no difference
+ assert_eq!(_0.reverse_bits(), _0);
+ assert_eq!(_1.reverse_bits(), _1);
+ }
+
+ #[test]
+ fn test_le() {
+ assert_eq!($T::from_le(A.to_le()), A);
+ assert_eq!($T::from_le(B.to_le()), B);
+ assert_eq!($T::from_le(C.to_le()), C);
+ assert_eq!($T::from_le(_0), _0);
+ assert_eq!($T::from_le(_1), _1);
+ assert_eq!(_0.to_le(), _0);
+ assert_eq!(_1.to_le(), _1);
+ }
+
+ #[test]
+ fn test_be() {
+ assert_eq!($T::from_be(A.to_be()), A);
+ assert_eq!($T::from_be(B.to_be()), B);
+ assert_eq!($T::from_be(C.to_be()), C);
+ assert_eq!($T::from_be(_0), _0);
+ assert_eq!($T::from_be(_1), _1);
+ assert_eq!(_0.to_be(), _0);
+ assert_eq!(_1.to_be(), _1);
+ }
+
+ #[test]
+ fn test_unsigned_checked_div() {
+ assert!((10 as $T).checked_div(2) == Some(5));
+ assert!((5 as $T).checked_div(0) == None);
+ }
+
+ fn from_str<T: FromStr>(t: &str) -> Option<T> {
+ FromStr::from_str(t).ok()
+ }
+
+ #[test]
+ pub fn test_from_str() {
+ assert_eq!(from_str::<$T>("0"), Some(0 as $T));
+ assert_eq!(from_str::<$T>("3"), Some(3 as $T));
+ assert_eq!(from_str::<$T>("10"), Some(10 as $T));
+ assert_eq!(from_str::<u32>("123456789"), Some(123456789 as u32));
+ assert_eq!(from_str::<$T>("00100"), Some(100 as $T));
+
+ assert_eq!(from_str::<$T>(""), None);
+ assert_eq!(from_str::<$T>(" "), None);
+ assert_eq!(from_str::<$T>("x"), None);
+ }
+
+ #[test]
+ pub fn test_parse_bytes() {
+ assert_eq!($T::from_str_radix("123", 10), Ok(123 as $T));
+ assert_eq!($T::from_str_radix("1001", 2), Ok(9 as $T));
+ assert_eq!($T::from_str_radix("123", 8), Ok(83 as $T));
+ assert_eq!(u16::from_str_radix("123", 16), Ok(291 as u16));
+ assert_eq!(u16::from_str_radix("ffff", 16), Ok(65535 as u16));
+ assert_eq!($T::from_str_radix("z", 36), Ok(35 as $T));
+
+ assert_eq!($T::from_str_radix("Z", 10).ok(), None::<$T>);
+ assert_eq!($T::from_str_radix("_", 2).ok(), None::<$T>);
+ }
+
+ #[test]
+ fn test_pow() {
+ let mut r = 2 as $T;
+ assert_eq!(r.pow(2), 4 as $T);
+ assert_eq!(r.pow(0), 1 as $T);
+ assert_eq!(r.wrapping_pow(2), 4 as $T);
+ assert_eq!(r.wrapping_pow(0), 1 as $T);
+ assert_eq!(r.checked_pow(2), Some(4 as $T));
+ assert_eq!(r.checked_pow(0), Some(1 as $T));
+ assert_eq!(r.overflowing_pow(2), (4 as $T, false));
+ assert_eq!(r.overflowing_pow(0), (1 as $T, false));
+ assert_eq!(r.saturating_pow(2), 4 as $T);
+ assert_eq!(r.saturating_pow(0), 1 as $T);
+
+ r = MAX;
+ // use `^` to represent .pow() with no overflow.
+ // if itest::MAX == 2^j-1, then itest is a `j` bit int,
+ // so that `itest::MAX*itest::MAX == 2^(2*j)-2^(j+1)+1`,
+ // thussaturating_pow the overflowing result is exactly 1.
+ assert_eq!(r.wrapping_pow(2), 1 as $T);
+ assert_eq!(r.checked_pow(2), None);
+ assert_eq!(r.overflowing_pow(2), (1 as $T, true));
+ assert_eq!(r.saturating_pow(2), MAX);
+ }
+ }
+ };
+}
--- /dev/null
+use core::num::Wrapping;
+
+macro_rules! wrapping_operation {
+ ($result:expr, $lhs:ident $op:tt $rhs:expr) => {
+ assert_eq!($result, $lhs $op $rhs);
+ assert_eq!($result, &$lhs $op $rhs);
+ assert_eq!($result, $lhs $op &$rhs);
+ assert_eq!($result, &$lhs $op &$rhs);
+ };
+ ($result:expr, $op:tt $expr:expr) => {
+ assert_eq!($result, $op $expr);
+ assert_eq!($result, $op &$expr);
+ };
+}
+
+macro_rules! wrapping_assignment {
+ ($result:expr, $lhs:ident $op:tt $rhs:expr) => {
+ let mut lhs1 = $lhs;
+ lhs1 $op $rhs;
+ assert_eq!($result, lhs1);
+
+ let mut lhs2 = $lhs;
+ lhs2 $op &$rhs;
+ assert_eq!($result, lhs2);
+ };
+}
+
+macro_rules! wrapping_test {
+ ($type:ty, $min:expr, $max:expr) => {
+ #[test]
+ fn wrapping_$type() {
+ let zero: Wrapping<$type> = Wrapping(0);
+ let one: Wrapping<$type> = Wrapping(1);
+ let min: Wrapping<$type> = Wrapping($min);
+ let max: Wrapping<$type> = Wrapping($max);
+
+ wrapping_operation!(min, max + one);
+ wrapping_assignment!(min, max += one);
+ wrapping_operation!(max, min - one);
+ wrapping_assignment!(max, min -= one);
+ wrapping_operation!(max, max * one);
+ wrapping_assignment!(max, max *= one);
+ wrapping_operation!(max, max / one);
+ wrapping_assignment!(max, max /= one);
+ wrapping_operation!(zero, max % one);
+ wrapping_assignment!(zero, max %= one);
+ wrapping_operation!(zero, zero & max);
+ wrapping_assignment!(zero, zero &= max);
+ wrapping_operation!(max, zero | max);
+ wrapping_assignment!(max, zero |= max);
+ wrapping_operation!(zero, max ^ max);
+ wrapping_assignment!(zero, max ^= max);
+ wrapping_operation!(zero, zero << 1usize);
+ wrapping_assignment!(zero, zero <<= 1usize);
+ wrapping_operation!(zero, zero >> 1usize);
+ wrapping_assignment!(zero, zero >>= 1usize);
+ wrapping_operation!(zero, -zero);
+ wrapping_operation!(max, !min);
+ }
+ };
+}
+
+wrapping_test!(i8, i8::MIN, i8::MAX);
+wrapping_test!(i16, i16::MIN, i16::MAX);
+wrapping_test!(i32, i32::MIN, i32::MAX);
+wrapping_test!(i64, i64::MIN, i64::MAX);
+#[cfg(not(target_os = "emscripten"))]
+wrapping_test!(i128, i128::MIN, i128::MAX);
+wrapping_test!(isize, isize::MIN, isize::MAX);
+wrapping_test!(u8, u8::MIN, u8::MAX);
+wrapping_test!(u16, u16::MIN, u16::MAX);
+wrapping_test!(u32, u32::MIN, u32::MAX);
+wrapping_test!(u64, u64::MIN, u64::MAX);
+#[cfg(not(target_os = "emscripten"))]
+wrapping_test!(u128, u128::MIN, u128::MAX);
+wrapping_test!(usize, usize::MIN, usize::MAX);
--- /dev/null
+use core::ops::{Bound, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo};
+
+// Test the Range structs and syntax.
+
+#[test]
+fn test_range() {
+ let r = Range { start: 2, end: 10 };
+ let mut count = 0;
+ for (i, ri) in r.enumerate() {
+ assert_eq!(ri, i + 2);
+ assert!(ri >= 2 && ri < 10);
+ count += 1;
+ }
+ assert_eq!(count, 8);
+}
+
+#[test]
+fn test_range_from() {
+ let r = RangeFrom { start: 2 };
+ let mut count = 0;
+ for (i, ri) in r.take(10).enumerate() {
+ assert_eq!(ri, i + 2);
+ assert!(ri >= 2 && ri < 12);
+ count += 1;
+ }
+ assert_eq!(count, 10);
+}
+
+#[test]
+fn test_range_to() {
+ // Not much to test.
+ let _ = RangeTo { end: 42 };
+}
+
+#[test]
+fn test_full_range() {
+ // Not much to test.
+ let _ = RangeFull;
+}
+
+#[test]
+fn test_range_inclusive() {
+ let mut r = RangeInclusive::new(1i8, 2);
+ assert_eq!(r.next(), Some(1));
+ assert_eq!(r.next(), Some(2));
+ assert_eq!(r.next(), None);
+
+ r = RangeInclusive::new(127i8, 127);
+ assert_eq!(r.next(), Some(127));
+ assert_eq!(r.next(), None);
+
+ r = RangeInclusive::new(-128i8, -128);
+ assert_eq!(r.next_back(), Some(-128));
+ assert_eq!(r.next_back(), None);
+
+ // degenerate
+ r = RangeInclusive::new(1, -1);
+ assert_eq!(r.size_hint(), (0, Some(0)));
+ assert_eq!(r.next(), None);
+}
+
+#[test]
+fn test_range_is_empty() {
+ assert!(!(0.0..10.0).is_empty());
+ assert!((-0.0..0.0).is_empty());
+ assert!((10.0..0.0).is_empty());
+
+ assert!(!(f32::NEG_INFINITY..f32::INFINITY).is_empty());
+ assert!((f32::EPSILON..f32::NAN).is_empty());
+ assert!((f32::NAN..f32::EPSILON).is_empty());
+ assert!((f32::NAN..f32::NAN).is_empty());
+
+ assert!(!(0.0..=10.0).is_empty());
+ assert!(!(-0.0..=0.0).is_empty());
+ assert!((10.0..=0.0).is_empty());
+
+ assert!(!(f32::NEG_INFINITY..=f32::INFINITY).is_empty());
+ assert!((f32::EPSILON..=f32::NAN).is_empty());
+ assert!((f32::NAN..=f32::EPSILON).is_empty());
+ assert!((f32::NAN..=f32::NAN).is_empty());
+}
+
+#[test]
+fn test_bound_cloned_unbounded() {
+ assert_eq!(Bound::<&u32>::Unbounded.cloned(), Bound::Unbounded);
+}
+
+#[test]
+fn test_bound_cloned_included() {
+ assert_eq!(Bound::Included(&3).cloned(), Bound::Included(3));
+}
+
+#[test]
+fn test_bound_cloned_excluded() {
+ assert_eq!(Bound::Excluded(&3).cloned(), Bound::Excluded(3));
+}
+
+#[test]
+#[allow(unused_comparisons)]
+#[allow(unused_mut)]
+fn test_range_syntax() {
+ let mut count = 0;
+ for i in 0_usize..10 {
+ assert!(i >= 0 && i < 10);
+ count += i;
+ }
+ assert_eq!(count, 45);
+
+ let mut count = 0;
+ let mut range = 0_usize..10;
+ for i in range {
+ assert!(i >= 0 && i < 10);
+ count += i;
+ }
+ assert_eq!(count, 45);
+
+ let mut count = 0;
+ let mut rf = 3_usize..;
+ for i in rf.take(10) {
+ assert!(i >= 3 && i < 13);
+ count += i;
+ }
+ assert_eq!(count, 75);
+
+ let _ = 0_usize..4 + 4 - 3;
+
+ fn foo() -> isize {
+ 42
+ }
+ let _ = 0..foo();
+
+ let _ = { &42..&100 }; // references to literals are OK
+ let _ = ..42_usize;
+
+ // Test we can use two different types with a common supertype.
+ let x = &42;
+ {
+ let y = 42;
+ let _ = x..&y;
+ }
+}
+
+#[test]
+#[allow(dead_code)]
+fn test_range_syntax_in_return_statement() {
+ fn return_range_to() -> RangeTo<i32> {
+ return ..1;
+ }
+ fn return_full_range() -> RangeFull {
+ return ..;
+ }
+ // Not much to test.
+}
--- /dev/null
+use core::cell::Cell;
+use core::clone::Clone;
+use core::mem;
+use core::ops::DerefMut;
+use core::option::*;
+
+#[test]
+fn test_get_ptr() {
+ unsafe {
+ let x: Box<_> = box 0;
+ let addr_x: *const isize = mem::transmute(&*x);
+ let opt = Some(x);
+ let y = opt.unwrap();
+ let addr_y: *const isize = mem::transmute(&*y);
+ assert_eq!(addr_x, addr_y);
+ }
+}
+
+#[test]
+fn test_get_str() {
+ let x = "test".to_string();
+ let addr_x = x.as_ptr();
+ let opt = Some(x);
+ let y = opt.unwrap();
+ let addr_y = y.as_ptr();
+ assert_eq!(addr_x, addr_y);
+}
+
+#[test]
+fn test_get_resource() {
+ use core::cell::RefCell;
+ use std::rc::Rc;
+
+ struct R {
+ i: Rc<RefCell<isize>>,
+ }
+
+ impl Drop for R {
+ fn drop(&mut self) {
+ let ii = &*self.i;
+ let i = *ii.borrow();
+ *ii.borrow_mut() = i + 1;
+ }
+ }
+
+ fn r(i: Rc<RefCell<isize>>) -> R {
+ R { i }
+ }
+
+ let i = Rc::new(RefCell::new(0));
+ {
+ let x = r(i.clone());
+ let opt = Some(x);
+ let _y = opt.unwrap();
+ }
+ assert_eq!(*i.borrow(), 1);
+}
+
+#[test]
+fn test_option_dance() {
+ let x = Some(());
+ let mut y = Some(5);
+ let mut y2 = 0;
+ for _x in x {
+ y2 = y.take().unwrap();
+ }
+ assert_eq!(y2, 5);
+ assert!(y.is_none());
+}
+
+#[test]
+#[should_panic]
+fn test_option_too_much_dance() {
+ struct A;
+ let mut y = Some(A);
+ let _y2 = y.take().unwrap();
+ let _y3 = y.take().unwrap();
+}
+
+#[test]
+fn test_and() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.and(Some(2)), Some(2));
+ assert_eq!(x.and(None::<isize>), None);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.and(Some(2)), None);
+ assert_eq!(x.and(None::<isize>), None);
+}
+
+#[test]
+fn test_and_then() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.and_then(|x| Some(x + 1)), Some(2));
+ assert_eq!(x.and_then(|_| None::<isize>), None);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.and_then(|x| Some(x + 1)), None);
+ assert_eq!(x.and_then(|_| None::<isize>), None);
+}
+
+#[test]
+fn test_or() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.or(Some(2)), Some(1));
+ assert_eq!(x.or(None), Some(1));
+
+ let x: Option<isize> = None;
+ assert_eq!(x.or(Some(2)), Some(2));
+ assert_eq!(x.or(None), None);
+}
+
+#[test]
+fn test_or_else() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.or_else(|| Some(2)), Some(1));
+ assert_eq!(x.or_else(|| None), Some(1));
+
+ let x: Option<isize> = None;
+ assert_eq!(x.or_else(|| Some(2)), Some(2));
+ assert_eq!(x.or_else(|| None), None);
+}
+
+#[test]
+fn test_unwrap() {
+ assert_eq!(Some(1).unwrap(), 1);
+ let s = Some("hello".to_string()).unwrap();
+ assert_eq!(s, "hello");
+}
+
+#[test]
+#[should_panic]
+fn test_unwrap_panic1() {
+ let x: Option<isize> = None;
+ x.unwrap();
+}
+
+#[test]
+#[should_panic]
+fn test_unwrap_panic2() {
+ let x: Option<String> = None;
+ x.unwrap();
+}
+
+#[test]
+fn test_unwrap_or() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.unwrap_or(2), 1);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.unwrap_or(2), 2);
+}
+
+#[test]
+fn test_unwrap_or_else() {
+ let x: Option<isize> = Some(1);
+ assert_eq!(x.unwrap_or_else(|| 2), 1);
+
+ let x: Option<isize> = None;
+ assert_eq!(x.unwrap_or_else(|| 2), 2);
+}
+
+#[test]
+fn test_iter() {
+ let val = 5;
+
+ let x = Some(val);
+ let mut it = x.iter();
+
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next(), Some(&val));
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+
+ let mut it = (&x).into_iter();
+ assert_eq!(it.next(), Some(&val));
+}
+
+#[test]
+fn test_mut_iter() {
+ let mut val = 5;
+ let new_val = 11;
+
+ let mut x = Some(val);
+ {
+ let mut it = x.iter_mut();
+
+ assert_eq!(it.size_hint(), (1, Some(1)));
+
+ match it.next() {
+ Some(interior) => {
+ assert_eq!(*interior, val);
+ *interior = new_val;
+ }
+ None => assert!(false),
+ }
+
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+ }
+ assert_eq!(x, Some(new_val));
+
+ let mut y = Some(val);
+ let mut it = (&mut y).into_iter();
+ assert_eq!(it.next(), Some(&mut val));
+}
+
+#[test]
+fn test_ord() {
+ let small = Some(1.0f64);
+ let big = Some(5.0f64);
+ let nan = Some(0.0f64 / 0.0);
+ assert!(!(nan < big));
+ assert!(!(nan > big));
+ assert!(small < big);
+ assert!(None < big);
+ assert!(big > None);
+}
+
+#[test]
+fn test_collect() {
+ let v: Option<Vec<isize>> = (0..0).map(|_| Some(0)).collect();
+ assert!(v == Some(vec![]));
+
+ let v: Option<Vec<isize>> = (0..3).map(|x| Some(x)).collect();
+ assert!(v == Some(vec![0, 1, 2]));
+
+ let v: Option<Vec<isize>> = (0..3).map(|x| if x > 1 { None } else { Some(x) }).collect();
+ assert!(v == None);
+
+ // test that it does not take more elements than it needs
+ let mut functions: [Box<dyn Fn() -> Option<()>>; 3] =
+ [box || Some(()), box || None, box || panic!()];
+
+ let v: Option<Vec<()>> = functions.iter_mut().map(|f| (*f)()).collect();
+
+ assert!(v == None);
+}
+
+#[test]
+fn test_copied() {
+ let val = 1;
+ let val_ref = &val;
+ let opt_none: Option<&'static u32> = None;
+ let opt_ref = Some(&val);
+ let opt_ref_ref = Some(&val_ref);
+
+ // None works
+ assert_eq!(opt_none.clone(), None);
+ assert_eq!(opt_none.copied(), None);
+
+ // Immutable ref works
+ assert_eq!(opt_ref.clone(), Some(&val));
+ assert_eq!(opt_ref.copied(), Some(1));
+
+ // Double Immutable ref works
+ assert_eq!(opt_ref_ref.clone(), Some(&val_ref));
+ assert_eq!(opt_ref_ref.clone().copied(), Some(&val));
+ assert_eq!(opt_ref_ref.copied().copied(), Some(1));
+}
+
+#[test]
+fn test_cloned() {
+ let val = 1;
+ let val_ref = &val;
+ let opt_none: Option<&'static u32> = None;
+ let opt_ref = Some(&val);
+ let opt_ref_ref = Some(&val_ref);
+
+ // None works
+ assert_eq!(opt_none.clone(), None);
+ assert_eq!(opt_none.cloned(), None);
+
+ // Immutable ref works
+ assert_eq!(opt_ref.clone(), Some(&val));
+ assert_eq!(opt_ref.cloned(), Some(1));
+
+ // Double Immutable ref works
+ assert_eq!(opt_ref_ref.clone(), Some(&val_ref));
+ assert_eq!(opt_ref_ref.clone().cloned(), Some(&val));
+ assert_eq!(opt_ref_ref.cloned().cloned(), Some(1));
+}
+
+#[test]
+fn test_try() {
+ fn try_option_some() -> Option<u8> {
+ let val = Some(1)?;
+ Some(val)
+ }
+ assert_eq!(try_option_some(), Some(1));
+
+ fn try_option_none() -> Option<u8> {
+ let val = None?;
+ Some(val)
+ }
+ assert_eq!(try_option_none(), None);
+
+ fn try_option_ok() -> Result<u8, NoneError> {
+ let val = Some(1)?;
+ Ok(val)
+ }
+ assert_eq!(try_option_ok(), Ok(1));
+
+ fn try_option_err() -> Result<u8, NoneError> {
+ let val = None?;
+ Ok(val)
+ }
+ assert_eq!(try_option_err(), Err(NoneError));
+}
+
+#[test]
+fn test_option_as_deref() {
+ // Some: &Option<T: Deref>::Some(T) -> Option<&T::Deref::Target>::Some(&*T)
+ let ref_option = &Some(&42);
+ assert_eq!(ref_option.as_deref(), Some(&42));
+
+ let ref_option = &Some(String::from("a result"));
+ assert_eq!(ref_option.as_deref(), Some("a result"));
+
+ let ref_option = &Some(vec![1, 2, 3, 4, 5]);
+ assert_eq!(ref_option.as_deref(), Some([1, 2, 3, 4, 5].as_slice()));
+
+ // None: &Option<T: Deref>>::None -> None
+ let ref_option: &Option<&i32> = &None;
+ assert_eq!(ref_option.as_deref(), None);
+}
+
+#[test]
+fn test_option_as_deref_mut() {
+ // Some: &mut Option<T: Deref>::Some(T) -> Option<&mut T::Deref::Target>::Some(&mut *T)
+ let mut val = 42;
+ let ref_option = &mut Some(&mut val);
+ assert_eq!(ref_option.as_deref_mut(), Some(&mut 42));
+
+ let ref_option = &mut Some(String::from("a result"));
+ assert_eq!(ref_option.as_deref_mut(), Some(String::from("a result").deref_mut()));
+
+ let ref_option = &mut Some(vec![1, 2, 3, 4, 5]);
+ assert_eq!(ref_option.as_deref_mut(), Some([1, 2, 3, 4, 5].as_mut_slice()));
+
+ // None: &mut Option<T: Deref>>::None -> None
+ let ref_option: &mut Option<&mut i32> = &mut None;
+ assert_eq!(ref_option.as_deref_mut(), None);
+}
+
+#[test]
+fn test_replace() {
+ let mut x = Some(2);
+ let old = x.replace(5);
+
+ assert_eq!(x, Some(5));
+ assert_eq!(old, Some(2));
+
+ let mut x = None;
+ let old = x.replace(3);
+
+ assert_eq!(x, Some(3));
+ assert_eq!(old, None);
+}
+
+#[test]
+fn option_const() {
+ // test that the methods of `Option` are usable in a const context
+
+ const OPTION: Option<usize> = Some(32);
+
+ const REF: Option<&usize> = OPTION.as_ref();
+ assert_eq!(REF, Some(&32));
+
+ const IS_SOME: bool = OPTION.is_some();
+ assert!(IS_SOME);
+
+ const IS_NONE: bool = OPTION.is_none();
+ assert!(!IS_NONE);
+}
+
+#[test]
+fn test_unwrap_drop() {
+ struct Dtor<'a> {
+ x: &'a Cell<isize>,
+ }
+
+ impl<'a> std::ops::Drop for Dtor<'a> {
+ fn drop(&mut self) {
+ self.x.set(self.x.get() - 1);
+ }
+ }
+
+ fn unwrap<T>(o: Option<T>) -> T {
+ match o {
+ Some(v) => v,
+ None => panic!(),
+ }
+ }
+
+ let x = &Cell::new(1);
+
+ {
+ let b = Some(Dtor { x });
+ let _c = unwrap(b);
+ }
+
+ assert_eq!(x.get(), 0);
+}
--- /dev/null
+use std::str::pattern::*;
+
+// This macro makes it easier to write
+// tests that do a series of iterations
+macro_rules! search_asserts {
+ ($haystack:expr, $needle:expr, $testname:expr, [$($func:ident),*], $result:expr) => {
+ let mut searcher = $needle.into_searcher($haystack);
+ let arr = [$( Step::from(searcher.$func()) ),*];
+ assert_eq!(&arr[..], &$result, $testname);
+ }
+}
+
+/// Combined enum for the results of next() and next_match()/next_reject()
+#[derive(Debug, PartialEq, Eq)]
+enum Step {
+ // variant names purposely chosen to
+ // be the same length for easy alignment
+ Matches(usize, usize),
+ Rejects(usize, usize),
+ InRange(usize, usize),
+ Done,
+}
+
+use self::Step::*;
+
+impl From<SearchStep> for Step {
+ fn from(x: SearchStep) -> Self {
+ match x {
+ SearchStep::Match(a, b) => Matches(a, b),
+ SearchStep::Reject(a, b) => Rejects(a, b),
+ SearchStep::Done => Done,
+ }
+ }
+}
+
+impl From<Option<(usize, usize)>> for Step {
+ fn from(x: Option<(usize, usize)>) -> Self {
+ match x {
+ Some((a, b)) => InRange(a, b),
+ None => Done,
+ }
+ }
+}
+
+// FIXME(Manishearth) these tests focus on single-character searching (CharSearcher)
+// and on next()/next_match(), not next_reject(). This is because
+// the memchr changes make next_match() for single chars complex, but next_reject()
+// continues to use next() under the hood. We should add more test cases for all
+// of these, as well as tests for StrSearcher and higher level tests for str::find() (etc)
+
+#[test]
+fn test_simple_iteration() {
+ search_asserts!(
+ "abcdeabcd",
+ 'a',
+ "forward iteration for ASCII string",
+ // a b c d e a b c d EOF
+ [next, next, next, next, next, next, next, next, next, next],
+ [
+ Matches(0, 1),
+ Rejects(1, 2),
+ Rejects(2, 3),
+ Rejects(3, 4),
+ Rejects(4, 5),
+ Matches(5, 6),
+ Rejects(6, 7),
+ Rejects(7, 8),
+ Rejects(8, 9),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ "abcdeabcd",
+ 'a',
+ "reverse iteration for ASCII string",
+ // d c b a e d c b a EOF
+ [
+ next_back, next_back, next_back, next_back, next_back, next_back, next_back, next_back,
+ next_back, next_back
+ ],
+ [
+ Rejects(8, 9),
+ Rejects(7, 8),
+ Rejects(6, 7),
+ Matches(5, 6),
+ Rejects(4, 5),
+ Rejects(3, 4),
+ Rejects(2, 3),
+ Rejects(1, 2),
+ Matches(0, 1),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ "我爱我的猫",
+ '我',
+ "forward iteration for Chinese string",
+ // 我 愛 我 的 貓 EOF
+ [next, next, next, next, next, next],
+ [Matches(0, 3), Rejects(3, 6), Matches(6, 9), Rejects(9, 12), Rejects(12, 15), Done]
+ );
+
+ search_asserts!(
+ "我的猫说meow",
+ 'm',
+ "forward iteration for mixed string",
+ // 我 的 猫 说 m e o w EOF
+ [next, next, next, next, next, next, next, next, next],
+ [
+ Rejects(0, 3),
+ Rejects(3, 6),
+ Rejects(6, 9),
+ Rejects(9, 12),
+ Matches(12, 13),
+ Rejects(13, 14),
+ Rejects(14, 15),
+ Rejects(15, 16),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ "我的猫说meow",
+ '猫',
+ "reverse iteration for mixed string",
+ // w o e m 说 猫 的 我 EOF
+ [
+ next_back, next_back, next_back, next_back, next_back, next_back, next_back, next_back,
+ next_back
+ ],
+ [
+ Rejects(15, 16),
+ Rejects(14, 15),
+ Rejects(13, 14),
+ Rejects(12, 13),
+ Rejects(9, 12),
+ Matches(6, 9),
+ Rejects(3, 6),
+ Rejects(0, 3),
+ Done
+ ]
+ );
+}
+
+#[test]
+fn test_simple_search() {
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "next_match for ASCII string",
+ [next_match, next_match, next_match, next_match],
+ [InRange(0, 1), InRange(5, 6), InRange(10, 11), Done]
+ );
+
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "next_match_back for ASCII string",
+ [next_match_back, next_match_back, next_match_back, next_match_back],
+ [InRange(10, 11), InRange(5, 6), InRange(0, 1), Done]
+ );
+
+ search_asserts!(
+ "abcdeab",
+ 'a',
+ "next_reject for ASCII string",
+ [next_reject, next_reject, next_match, next_reject, next_reject],
+ [InRange(1, 2), InRange(2, 3), InRange(5, 6), InRange(6, 7), Done]
+ );
+
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "next_reject_back for ASCII string",
+ [
+ next_reject_back,
+ next_reject_back,
+ next_match_back,
+ next_reject_back,
+ next_reject_back,
+ next_reject_back
+ ],
+ [
+ InRange(14, 15),
+ InRange(13, 14),
+ InRange(10, 11),
+ InRange(9, 10),
+ InRange(8, 9),
+ InRange(7, 8)
+ ]
+ );
+}
+
+// Á, 각, ก, 😀 all end in 0x81
+// 🁀, ᘀ do not end in 0x81 but contain the byte
+// ꁁ has 0x81 as its second and third bytes.
+//
+// The memchr-using implementation of next_match
+// and next_match_back temporarily violate
+// the property that the search is always on a unicode boundary,
+// which is fine as long as this never reaches next() or next_back().
+// So we test if next() is correct after each next_match() as well.
+const STRESS: &str = "Áa🁀bÁꁁfg😁각กᘀ각aÁ각ꁁก😁a";
+
+#[test]
+fn test_stress_indices() {
+ // this isn't really a test, more of documentation on the indices of each character in the stresstest string
+
+ search_asserts!(
+ STRESS,
+ 'x',
+ "Indices of characters in stress test",
+ [
+ next, next, next, next, next, next, next, next, next, next, next, next, next, next,
+ next, next, next, next, next, next, next
+ ],
+ [
+ Rejects(0, 2), // Á
+ Rejects(2, 3), // a
+ Rejects(3, 7), // 🁀
+ Rejects(7, 8), // b
+ Rejects(8, 10), // Á
+ Rejects(10, 13), // ꁁ
+ Rejects(13, 14), // f
+ Rejects(14, 15), // g
+ Rejects(15, 19), // 😀
+ Rejects(19, 22), // 각
+ Rejects(22, 25), // ก
+ Rejects(25, 28), // ᘀ
+ Rejects(28, 31), // 각
+ Rejects(31, 32), // a
+ Rejects(32, 34), // Á
+ Rejects(34, 37), // 각
+ Rejects(37, 40), // ꁁ
+ Rejects(40, 43), // ก
+ Rejects(43, 47), // 😀
+ Rejects(47, 48), // a
+ Done
+ ]
+ );
+}
+
+#[test]
+fn test_forward_search_shared_bytes() {
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Forward search for two-byte Latin character",
+ [next_match, next_match, next_match, next_match],
+ [InRange(0, 2), InRange(8, 10), InRange(32, 34), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Forward search for two-byte Latin character; check if next() still works",
+ [next_match, next, next_match, next, next_match, next, next_match],
+ [
+ InRange(0, 2),
+ Rejects(2, 3),
+ InRange(8, 10),
+ Rejects(10, 13),
+ InRange(32, 34),
+ Rejects(34, 37),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Forward search for three-byte Hangul character",
+ [next_match, next, next_match, next_match, next_match],
+ [InRange(19, 22), Rejects(22, 25), InRange(28, 31), InRange(34, 37), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Forward search for three-byte Hangul character; check if next() still works",
+ [next_match, next, next_match, next, next_match, next, next_match],
+ [
+ InRange(19, 22),
+ Rejects(22, 25),
+ InRange(28, 31),
+ Rejects(31, 32),
+ InRange(34, 37),
+ Rejects(37, 40),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Forward search for three-byte Thai character",
+ [next_match, next, next_match, next, next_match],
+ [InRange(22, 25), Rejects(25, 28), InRange(40, 43), Rejects(43, 47), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Forward search for three-byte Thai character; check if next() still works",
+ [next_match, next, next_match, next, next_match],
+ [InRange(22, 25), Rejects(25, 28), InRange(40, 43), Rejects(43, 47), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Forward search for four-byte emoji",
+ [next_match, next, next_match, next, next_match],
+ [InRange(15, 19), Rejects(19, 22), InRange(43, 47), Rejects(47, 48), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Forward search for four-byte emoji; check if next() still works",
+ [next_match, next, next_match, next, next_match],
+ [InRange(15, 19), Rejects(19, 22), InRange(43, 47), Rejects(47, 48), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Forward search for three-byte Yi character with repeated bytes",
+ [next_match, next, next_match, next, next_match],
+ [InRange(10, 13), Rejects(13, 14), InRange(37, 40), Rejects(40, 43), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Forward search for three-byte Yi character with repeated bytes; check if next() still works",
+ [next_match, next, next_match, next, next_match],
+ [InRange(10, 13), Rejects(13, 14), InRange(37, 40), Rejects(40, 43), Done]
+ );
+}
+
+#[test]
+fn test_reverse_search_shared_bytes() {
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Reverse search for two-byte Latin character",
+ [next_match_back, next_match_back, next_match_back, next_match_back],
+ [InRange(32, 34), InRange(8, 10), InRange(0, 2), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Reverse search for two-byte Latin character; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back, next_back],
+ [InRange(32, 34), Rejects(31, 32), InRange(8, 10), Rejects(7, 8), InRange(0, 2), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Reverse search for three-byte Hangul character",
+ [next_match_back, next_back, next_match_back, next_match_back, next_match_back],
+ [InRange(34, 37), Rejects(32, 34), InRange(28, 31), InRange(19, 22), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '각',
+ "Reverse search for three-byte Hangul character; check if next_back() still works",
+ [
+ next_match_back,
+ next_back,
+ next_match_back,
+ next_back,
+ next_match_back,
+ next_back,
+ next_match_back
+ ],
+ [
+ InRange(34, 37),
+ Rejects(32, 34),
+ InRange(28, 31),
+ Rejects(25, 28),
+ InRange(19, 22),
+ Rejects(15, 19),
+ Done
+ ]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Reverse search for three-byte Thai character",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(40, 43), Rejects(37, 40), InRange(22, 25), Rejects(19, 22), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Reverse search for three-byte Thai character; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(40, 43), Rejects(37, 40), InRange(22, 25), Rejects(19, 22), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Reverse search for four-byte emoji",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(43, 47), Rejects(40, 43), InRange(15, 19), Rejects(14, 15), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Reverse search for four-byte emoji; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(43, 47), Rejects(40, 43), InRange(15, 19), Rejects(14, 15), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Reverse search for three-byte Yi character with repeated bytes",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(37, 40), Rejects(34, 37), InRange(10, 13), Rejects(8, 10), Done]
+ );
+
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Reverse search for three-byte Yi character with repeated bytes; check if next_back() still works",
+ [next_match_back, next_back, next_match_back, next_back, next_match_back],
+ [InRange(37, 40), Rejects(34, 37), InRange(10, 13), Rejects(8, 10), Done]
+ );
+}
+
+#[test]
+fn double_ended_regression_test() {
+ // https://github.com/rust-lang/rust/issues/47175
+ // Ensures that double ended searching comes to a convergence
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "alternating double ended search",
+ [next_match, next_match_back, next_match, next_match_back],
+ [InRange(0, 1), InRange(10, 11), InRange(5, 6), Done]
+ );
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'a',
+ "triple double ended search for a",
+ [next_match, next_match_back, next_match_back, next_match_back],
+ [InRange(0, 1), InRange(10, 11), InRange(5, 6), Done]
+ );
+ search_asserts!(
+ "abcdeabcdeabcde",
+ 'd',
+ "triple double ended search for d",
+ [next_match, next_match_back, next_match_back, next_match_back],
+ [InRange(3, 4), InRange(13, 14), InRange(8, 9), Done]
+ );
+ search_asserts!(
+ STRESS,
+ 'Á',
+ "Double ended search for two-byte Latin character",
+ [next_match, next_match_back, next_match, next_match_back],
+ [InRange(0, 2), InRange(32, 34), InRange(8, 10), Done]
+ );
+ search_asserts!(
+ STRESS,
+ '각',
+ "Reverse double ended search for three-byte Hangul character",
+ [next_match_back, next_back, next_match, next, next_match_back, next_match],
+ [InRange(34, 37), Rejects(32, 34), InRange(19, 22), Rejects(22, 25), InRange(28, 31), Done]
+ );
+ search_asserts!(
+ STRESS,
+ 'ก',
+ "Double ended search for three-byte Thai character",
+ [next_match, next_back, next, next_match_back, next_match],
+ [InRange(22, 25), Rejects(47, 48), Rejects(25, 28), InRange(40, 43), Done]
+ );
+ search_asserts!(
+ STRESS,
+ '😁',
+ "Double ended search for four-byte emoji",
+ [next_match_back, next, next_match, next_back, next_match],
+ [InRange(43, 47), Rejects(0, 2), InRange(15, 19), Rejects(40, 43), Done]
+ );
+ search_asserts!(
+ STRESS,
+ 'ꁁ',
+ "Double ended search for three-byte Yi character with repeated bytes",
+ [next_match, next, next_match_back, next_back, next_match],
+ [InRange(10, 13), Rejects(13, 14), InRange(37, 40), Rejects(34, 37), Done]
+ );
+}
--- /dev/null
+use core::pin::Pin;
+
+#[test]
+fn pin_const() {
+ // test that the methods of `Pin` are usable in a const context
+
+ const POINTER: &'static usize = &2;
+
+ const PINNED: Pin<&'static usize> = Pin::new(POINTER);
+ const PINNED_UNCHECKED: Pin<&'static usize> = unsafe { Pin::new_unchecked(POINTER) };
+ assert_eq!(PINNED_UNCHECKED, PINNED);
+
+ const INNER: &'static usize = Pin::into_inner(PINNED);
+ assert_eq!(INNER, POINTER);
+
+ const INNER_UNCHECKED: &'static usize = unsafe { Pin::into_inner_unchecked(PINNED) };
+ assert_eq!(INNER_UNCHECKED, POINTER);
+
+ const REF: &'static usize = PINNED.get_ref();
+ assert_eq!(REF, POINTER);
+
+ // Note: `pin_mut_const` tests that the methods of `Pin<&mut T>` are usable in a const context.
+ // A const fn is used because `&mut` is not (yet) usable in constants.
+ const fn pin_mut_const() {
+ let _ = Pin::new(&mut 2).into_ref();
+ let _ = Pin::new(&mut 2).get_mut();
+ let _ = unsafe { Pin::new(&mut 2).get_unchecked_mut() };
+ }
+
+ pin_mut_const();
+}
--- /dev/null
+use core::cell::RefCell;
+use core::ptr::*;
+
+#[test]
+fn test_const_from_raw_parts() {
+ const SLICE: &[u8] = &[1, 2, 3, 4];
+ const FROM_RAW: &[u8] = unsafe { &*slice_from_raw_parts(SLICE.as_ptr(), SLICE.len()) };
+ assert_eq!(SLICE, FROM_RAW);
+
+ let slice = &[1, 2, 3, 4, 5];
+ let from_raw = unsafe { &*slice_from_raw_parts(slice.as_ptr(), 2) };
+ assert_eq!(&slice[..2], from_raw);
+}
+
+#[test]
+fn test() {
+ unsafe {
+ struct Pair {
+ fst: isize,
+ snd: isize,
+ };
+ let mut p = Pair { fst: 10, snd: 20 };
+ let pptr: *mut Pair = &mut p;
+ let iptr: *mut isize = pptr as *mut isize;
+ assert_eq!(*iptr, 10);
+ *iptr = 30;
+ assert_eq!(*iptr, 30);
+ assert_eq!(p.fst, 30);
+
+ *pptr = Pair { fst: 50, snd: 60 };
+ assert_eq!(*iptr, 50);
+ assert_eq!(p.fst, 50);
+ assert_eq!(p.snd, 60);
+
+ let v0 = vec![32000u16, 32001u16, 32002u16];
+ let mut v1 = vec![0u16, 0u16, 0u16];
+
+ copy(v0.as_ptr().offset(1), v1.as_mut_ptr().offset(1), 1);
+ assert!((v1[0] == 0u16 && v1[1] == 32001u16 && v1[2] == 0u16));
+ copy(v0.as_ptr().offset(2), v1.as_mut_ptr(), 1);
+ assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 0u16));
+ copy(v0.as_ptr(), v1.as_mut_ptr().offset(2), 1);
+ assert!((v1[0] == 32002u16 && v1[1] == 32001u16 && v1[2] == 32000u16));
+ }
+}
+
+#[test]
+fn test_is_null() {
+ let p: *const isize = null();
+ assert!(p.is_null());
+
+ let q = p.wrapping_offset(1);
+ assert!(!q.is_null());
+
+ let mp: *mut isize = null_mut();
+ assert!(mp.is_null());
+
+ let mq = mp.wrapping_offset(1);
+ assert!(!mq.is_null());
+
+ // Pointers to unsized types -- slices
+ let s: &mut [u8] = &mut [1, 2, 3];
+ let cs: *const [u8] = s;
+ assert!(!cs.is_null());
+
+ let ms: *mut [u8] = s;
+ assert!(!ms.is_null());
+
+ let cz: *const [u8] = &[];
+ assert!(!cz.is_null());
+
+ let mz: *mut [u8] = &mut [];
+ assert!(!mz.is_null());
+
+ let ncs: *const [u8] = null::<[u8; 3]>();
+ assert!(ncs.is_null());
+
+ let nms: *mut [u8] = null_mut::<[u8; 3]>();
+ assert!(nms.is_null());
+
+ // Pointers to unsized types -- trait objects
+ let ci: *const dyn ToString = &3;
+ assert!(!ci.is_null());
+
+ let mi: *mut dyn ToString = &mut 3;
+ assert!(!mi.is_null());
+
+ let nci: *const dyn ToString = null::<isize>();
+ assert!(nci.is_null());
+
+ let nmi: *mut dyn ToString = null_mut::<isize>();
+ assert!(nmi.is_null());
+}
+
+#[test]
+fn test_as_ref() {
+ unsafe {
+ let p: *const isize = null();
+ assert_eq!(p.as_ref(), None);
+
+ let q: *const isize = &2;
+ assert_eq!(q.as_ref().unwrap(), &2);
+
+ let p: *mut isize = null_mut();
+ assert_eq!(p.as_ref(), None);
+
+ let q: *mut isize = &mut 2;
+ assert_eq!(q.as_ref().unwrap(), &2);
+
+ // Lifetime inference
+ let u = 2isize;
+ {
+ let p = &u as *const isize;
+ assert_eq!(p.as_ref().unwrap(), &2);
+ }
+
+ // Pointers to unsized types -- slices
+ let s: &mut [u8] = &mut [1, 2, 3];
+ let cs: *const [u8] = s;
+ assert_eq!(cs.as_ref(), Some(&*s));
+
+ let ms: *mut [u8] = s;
+ assert_eq!(ms.as_ref(), Some(&*s));
+
+ let cz: *const [u8] = &[];
+ assert_eq!(cz.as_ref(), Some(&[][..]));
+
+ let mz: *mut [u8] = &mut [];
+ assert_eq!(mz.as_ref(), Some(&[][..]));
+
+ let ncs: *const [u8] = null::<[u8; 3]>();
+ assert_eq!(ncs.as_ref(), None);
+
+ let nms: *mut [u8] = null_mut::<[u8; 3]>();
+ assert_eq!(nms.as_ref(), None);
+
+ // Pointers to unsized types -- trait objects
+ let ci: *const dyn ToString = &3;
+ assert!(ci.as_ref().is_some());
+
+ let mi: *mut dyn ToString = &mut 3;
+ assert!(mi.as_ref().is_some());
+
+ let nci: *const dyn ToString = null::<isize>();
+ assert!(nci.as_ref().is_none());
+
+ let nmi: *mut dyn ToString = null_mut::<isize>();
+ assert!(nmi.as_ref().is_none());
+ }
+}
+
+#[test]
+fn test_as_mut() {
+ unsafe {
+ let p: *mut isize = null_mut();
+ assert!(p.as_mut() == None);
+
+ let q: *mut isize = &mut 2;
+ assert!(q.as_mut().unwrap() == &mut 2);
+
+ // Lifetime inference
+ let mut u = 2isize;
+ {
+ let p = &mut u as *mut isize;
+ assert!(p.as_mut().unwrap() == &mut 2);
+ }
+
+ // Pointers to unsized types -- slices
+ let s: &mut [u8] = &mut [1, 2, 3];
+ let ms: *mut [u8] = s;
+ assert_eq!(ms.as_mut(), Some(&mut [1, 2, 3][..]));
+
+ let mz: *mut [u8] = &mut [];
+ assert_eq!(mz.as_mut(), Some(&mut [][..]));
+
+ let nms: *mut [u8] = null_mut::<[u8; 3]>();
+ assert_eq!(nms.as_mut(), None);
+
+ // Pointers to unsized types -- trait objects
+ let mi: *mut dyn ToString = &mut 3;
+ assert!(mi.as_mut().is_some());
+
+ let nmi: *mut dyn ToString = null_mut::<isize>();
+ assert!(nmi.as_mut().is_none());
+ }
+}
+
+#[test]
+fn test_ptr_addition() {
+ unsafe {
+ let xs = vec![5; 16];
+ let mut ptr = xs.as_ptr();
+ let end = ptr.offset(16);
+
+ while ptr < end {
+ assert_eq!(*ptr, 5);
+ ptr = ptr.offset(1);
+ }
+
+ let mut xs_mut = xs;
+ let mut m_ptr = xs_mut.as_mut_ptr();
+ let m_end = m_ptr.offset(16);
+
+ while m_ptr < m_end {
+ *m_ptr += 5;
+ m_ptr = m_ptr.offset(1);
+ }
+
+ assert!(xs_mut == vec![10; 16]);
+ }
+}
+
+#[test]
+fn test_ptr_subtraction() {
+ unsafe {
+ let xs = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+ let mut idx = 9;
+ let ptr = xs.as_ptr();
+
+ while idx >= 0 {
+ assert_eq!(*(ptr.offset(idx as isize)), idx as isize);
+ idx = idx - 1;
+ }
+
+ let mut xs_mut = xs;
+ let m_start = xs_mut.as_mut_ptr();
+ let mut m_ptr = m_start.offset(9);
+
+ loop {
+ *m_ptr += *m_ptr;
+ if m_ptr == m_start {
+ break;
+ }
+ m_ptr = m_ptr.offset(-1);
+ }
+
+ assert_eq!(xs_mut, [0, 2, 4, 6, 8, 10, 12, 14, 16, 18]);
+ }
+}
+
+#[test]
+fn test_set_memory() {
+ let mut xs = [0u8; 20];
+ let ptr = xs.as_mut_ptr();
+ unsafe {
+ write_bytes(ptr, 5u8, xs.len());
+ }
+ assert!(xs == [5u8; 20]);
+}
+
+#[test]
+fn test_unsized_nonnull() {
+ let xs: &[i32] = &[1, 2, 3];
+ let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) };
+ let ys = unsafe { ptr.as_ref() };
+ let zs: &[i32] = &[1, 2, 3];
+ assert!(ys == zs);
+}
+
+#[test]
+#[allow(warnings)]
+// Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
+// ABI, or even point to an actual executable code, because the function itself is never invoked.
+#[no_mangle]
+pub fn test_variadic_fnptr() {
+ use core::hash::{Hash, SipHasher};
+ extern "C" {
+ fn test_variadic_fnptr(_: u64, ...) -> f64;
+ }
+ let p: unsafe extern "C" fn(u64, ...) -> f64 = test_variadic_fnptr;
+ let q = p.clone();
+ assert_eq!(p, q);
+ assert!(!(p < q));
+ let mut s = SipHasher::new();
+ assert_eq!(p.hash(&mut s), q.hash(&mut s));
+}
+
+#[test]
+fn write_unaligned_drop() {
+ thread_local! {
+ static DROPS: RefCell<Vec<u32>> = RefCell::new(Vec::new());
+ }
+
+ struct Dropper(u32);
+
+ impl Drop for Dropper {
+ fn drop(&mut self) {
+ DROPS.with(|d| d.borrow_mut().push(self.0));
+ }
+ }
+
+ {
+ let c = Dropper(0);
+ let mut t = Dropper(1);
+ unsafe {
+ write_unaligned(&mut t, c);
+ }
+ }
+ DROPS.with(|d| assert_eq!(*d.borrow(), [0]));
+}
+
+#[test]
+fn align_offset_zst() {
+ // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at
+ // all, because no amount of elements will align the pointer.
+ let mut p = 1;
+ while p < 1024 {
+ assert_eq!((p as *const ()).align_offset(p), 0);
+ if p != 1 {
+ assert_eq!(((p + 1) as *const ()).align_offset(p), !0);
+ }
+ p = (p + 1).next_power_of_two();
+ }
+}
+
+#[test]
+fn align_offset_stride1() {
+ // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to
+ // number of bytes.
+ let mut align = 1;
+ while align < 1024 {
+ for ptr in 1..2 * align {
+ let expected = ptr % align;
+ let offset = if expected == 0 { 0 } else { align - expected };
+ assert_eq!(
+ (ptr as *const u8).align_offset(align),
+ offset,
+ "ptr = {}, align = {}, size = 1",
+ ptr,
+ align
+ );
+ }
+ align = (align + 1).next_power_of_two();
+ }
+}
+
+#[test]
+fn align_offset_weird_strides() {
+ #[repr(packed)]
+ struct A3(u16, u8);
+ struct A4(u32);
+ #[repr(packed)]
+ struct A5(u32, u8);
+ #[repr(packed)]
+ struct A6(u32, u16);
+ #[repr(packed)]
+ struct A7(u32, u16, u8);
+ #[repr(packed)]
+ struct A8(u32, u32);
+ #[repr(packed)]
+ struct A9(u32, u32, u8);
+ #[repr(packed)]
+ struct A10(u32, u32, u16);
+
+ unsafe fn test_weird_stride<T>(ptr: *const T, align: usize) -> bool {
+ let numptr = ptr as usize;
+ let mut expected = usize::MAX;
+ // Naive but definitely correct way to find the *first* aligned element of stride::<T>.
+ for el in 0..align {
+ if (numptr + el * ::std::mem::size_of::<T>()) % align == 0 {
+ expected = el;
+ break;
+ }
+ }
+ let got = ptr.align_offset(align);
+ if got != expected {
+ eprintln!(
+ "aligning {:p} (with stride of {}) to {}, expected {}, got {}",
+ ptr,
+ ::std::mem::size_of::<T>(),
+ align,
+ expected,
+ got
+ );
+ return true;
+ }
+ return false;
+ }
+
+ // For pointers of stride != 1, we verify the algorithm against the naivest possible
+ // implementation
+ let mut align = 1;
+ let mut x = false;
+ // Miri is too slow
+ let limit = if cfg!(miri) { 32 } else { 1024 };
+ while align < limit {
+ for ptr in 1usize..4 * align {
+ unsafe {
+ x |= test_weird_stride::<A3>(ptr as *const A3, align);
+ x |= test_weird_stride::<A4>(ptr as *const A4, align);
+ x |= test_weird_stride::<A5>(ptr as *const A5, align);
+ x |= test_weird_stride::<A6>(ptr as *const A6, align);
+ x |= test_weird_stride::<A7>(ptr as *const A7, align);
+ x |= test_weird_stride::<A8>(ptr as *const A8, align);
+ x |= test_weird_stride::<A9>(ptr as *const A9, align);
+ x |= test_weird_stride::<A10>(ptr as *const A10, align);
+ }
+ }
+ align = (align + 1).next_power_of_two();
+ }
+ assert!(!x);
+}
--- /dev/null
+use core::ops::DerefMut;
+use core::option::*;
+
+fn op1() -> Result<isize, &'static str> {
+ Ok(666)
+}
+fn op2() -> Result<isize, &'static str> {
+ Err("sadface")
+}
+
+#[test]
+fn test_and() {
+ assert_eq!(op1().and(Ok(667)).unwrap(), 667);
+ assert_eq!(op1().and(Err::<i32, &'static str>("bad")).unwrap_err(), "bad");
+
+ assert_eq!(op2().and(Ok(667)).unwrap_err(), "sadface");
+ assert_eq!(op2().and(Err::<i32, &'static str>("bad")).unwrap_err(), "sadface");
+}
+
+#[test]
+fn test_and_then() {
+ assert_eq!(op1().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap(), 667);
+ assert_eq!(op1().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(), "bad");
+
+ assert_eq!(op2().and_then(|i| Ok::<isize, &'static str>(i + 1)).unwrap_err(), "sadface");
+ assert_eq!(op2().and_then(|_| Err::<isize, &'static str>("bad")).unwrap_err(), "sadface");
+}
+
+#[test]
+fn test_or() {
+ assert_eq!(op1().or(Ok::<_, &'static str>(667)).unwrap(), 666);
+ assert_eq!(op1().or(Err("bad")).unwrap(), 666);
+
+ assert_eq!(op2().or(Ok::<_, &'static str>(667)).unwrap(), 667);
+ assert_eq!(op2().or(Err("bad")).unwrap_err(), "bad");
+}
+
+#[test]
+fn test_or_else() {
+ assert_eq!(op1().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 666);
+ assert_eq!(op1().or_else(|e| Err::<isize, &'static str>(e)).unwrap(), 666);
+
+ assert_eq!(op2().or_else(|_| Ok::<isize, &'static str>(667)).unwrap(), 667);
+ assert_eq!(op2().or_else(|e| Err::<isize, &'static str>(e)).unwrap_err(), "sadface");
+}
+
+#[test]
+fn test_impl_map() {
+ assert!(Ok::<isize, isize>(1).map(|x| x + 1) == Ok(2));
+ assert!(Err::<isize, isize>(1).map(|x| x + 1) == Err(1));
+}
+
+#[test]
+fn test_impl_map_err() {
+ assert!(Ok::<isize, isize>(1).map_err(|x| x + 1) == Ok(1));
+ assert!(Err::<isize, isize>(1).map_err(|x| x + 1) == Err(2));
+}
+
+#[test]
+fn test_collect() {
+ let v: Result<Vec<isize>, ()> = (0..0).map(|_| Ok::<isize, ()>(0)).collect();
+ assert!(v == Ok(vec![]));
+
+ let v: Result<Vec<isize>, ()> = (0..3).map(|x| Ok::<isize, ()>(x)).collect();
+ assert!(v == Ok(vec![0, 1, 2]));
+
+ let v: Result<Vec<isize>, isize> = (0..3).map(|x| if x > 1 { Err(x) } else { Ok(x) }).collect();
+ assert!(v == Err(2));
+
+ // test that it does not take more elements than it needs
+ let mut functions: [Box<dyn Fn() -> Result<(), isize>>; 3] =
+ [box || Ok(()), box || Err(1), box || panic!()];
+
+ let v: Result<Vec<()>, isize> = functions.iter_mut().map(|f| (*f)()).collect();
+ assert!(v == Err(1));
+}
+
+#[test]
+fn test_fmt_default() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ let err: Result<isize, &'static str> = Err("Err");
+
+ let s = format!("{:?}", ok);
+ assert_eq!(s, "Ok(100)");
+ let s = format!("{:?}", err);
+ assert_eq!(s, "Err(\"Err\")");
+}
+
+#[test]
+fn test_unwrap_or() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ let ok_err: Result<isize, &'static str> = Err("Err");
+
+ assert_eq!(ok.unwrap_or(50), 100);
+ assert_eq!(ok_err.unwrap_or(50), 50);
+}
+
+#[test]
+fn test_unwrap_or_else() {
+ fn handler(msg: &'static str) -> isize {
+ if msg == "I got this." { 50 } else { panic!("BadBad") }
+ }
+
+ let ok: Result<isize, &'static str> = Ok(100);
+ let ok_err: Result<isize, &'static str> = Err("I got this.");
+
+ assert_eq!(ok.unwrap_or_else(handler), 100);
+ assert_eq!(ok_err.unwrap_or_else(handler), 50);
+}
+
+#[test]
+#[should_panic]
+pub fn test_unwrap_or_else_panic() {
+ fn handler(msg: &'static str) -> isize {
+ if msg == "I got this." { 50 } else { panic!("BadBad") }
+ }
+
+ let bad_err: Result<isize, &'static str> = Err("Unrecoverable mess.");
+ let _: isize = bad_err.unwrap_or_else(handler);
+}
+
+#[test]
+pub fn test_expect_ok() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ assert_eq!(ok.expect("Unexpected error"), 100);
+}
+#[test]
+#[should_panic(expected = "Got expected error: \"All good\"")]
+pub fn test_expect_err() {
+ let err: Result<isize, &'static str> = Err("All good");
+ err.expect("Got expected error");
+}
+
+#[test]
+pub fn test_expect_err_err() {
+ let ok: Result<&'static str, isize> = Err(100);
+ assert_eq!(ok.expect_err("Unexpected ok"), 100);
+}
+#[test]
+#[should_panic(expected = "Got expected ok: \"All good\"")]
+pub fn test_expect_err_ok() {
+ let err: Result<&'static str, isize> = Ok("All good");
+ err.expect_err("Got expected ok");
+}
+
+#[test]
+pub fn test_iter() {
+ let ok: Result<isize, &'static str> = Ok(100);
+ let mut it = ok.iter();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next(), Some(&100));
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+ assert_eq!((&ok).into_iter().next(), Some(&100));
+
+ let err: Result<isize, &'static str> = Err("error");
+ assert_eq!(err.iter().next(), None);
+}
+
+#[test]
+pub fn test_iter_mut() {
+ let mut ok: Result<isize, &'static str> = Ok(100);
+ for loc in ok.iter_mut() {
+ *loc = 200;
+ }
+ assert_eq!(ok, Ok(200));
+ for loc in &mut ok {
+ *loc = 300;
+ }
+ assert_eq!(ok, Ok(300));
+
+ let mut err: Result<isize, &'static str> = Err("error");
+ for loc in err.iter_mut() {
+ *loc = 200;
+ }
+ assert_eq!(err, Err("error"));
+}
+
+#[test]
+pub fn test_unwrap_or_default() {
+ assert_eq!(op1().unwrap_or_default(), 666);
+ assert_eq!(op2().unwrap_or_default(), 0);
+}
+
+#[test]
+pub fn test_into_ok() {
+ fn infallible_op() -> Result<isize, !> {
+ Ok(666)
+ }
+
+ assert_eq!(infallible_op().into_ok(), 666);
+
+ enum MyNeverToken {}
+ impl From<MyNeverToken> for ! {
+ fn from(never: MyNeverToken) -> ! {
+ match never {}
+ }
+ }
+
+ fn infallible_op2() -> Result<isize, MyNeverToken> {
+ Ok(667)
+ }
+
+ assert_eq!(infallible_op2().into_ok(), 667);
+}
+
+#[test]
+fn test_try() {
+ fn try_result_some() -> Option<u8> {
+ let val = Ok(1)?;
+ Some(val)
+ }
+ assert_eq!(try_result_some(), Some(1));
+
+ fn try_result_none() -> Option<u8> {
+ let val = Err(NoneError)?;
+ Some(val)
+ }
+ assert_eq!(try_result_none(), None);
+
+ fn try_result_ok() -> Result<u8, u8> {
+ let result: Result<u8, u8> = Ok(1);
+ let val = result?;
+ Ok(val)
+ }
+ assert_eq!(try_result_ok(), Ok(1));
+
+ fn try_result_err() -> Result<u8, u8> {
+ let result: Result<u8, u8> = Err(1);
+ let val = result?;
+ Ok(val)
+ }
+ assert_eq!(try_result_err(), Err(1));
+}
+
+#[test]
+fn test_result_as_deref() {
+ // &Result<T: Deref, E>::Ok(T).as_deref() ->
+ // Result<&T::Deref::Target, &E>::Ok(&*T)
+ let ref_ok = &Result::Ok::<&i32, u8>(&42);
+ let expected_result = Result::Ok::<&i32, &u8>(&42);
+ assert_eq!(ref_ok.as_deref(), expected_result);
+
+ let ref_ok = &Result::Ok::<String, u32>(String::from("a result"));
+ let expected_result = Result::Ok::<&str, &u32>("a result");
+ assert_eq!(ref_ok.as_deref(), expected_result);
+
+ let ref_ok = &Result::Ok::<Vec<i32>, u32>(vec![1, 2, 3, 4, 5]);
+ let expected_result = Result::Ok::<&[i32], &u32>([1, 2, 3, 4, 5].as_slice());
+ assert_eq!(ref_ok.as_deref(), expected_result);
+
+ // &Result<T: Deref, E>::Err(T).as_deref() ->
+ // Result<&T::Deref::Target, &E>::Err(&*E)
+ let val = 41;
+ let ref_err = &Result::Err::<&u8, i32>(val);
+ let expected_result = Result::Err::<&u8, &i32>(&val);
+ assert_eq!(ref_err.as_deref(), expected_result);
+
+ let s = String::from("an error");
+ let ref_err = &Result::Err::<&u32, String>(s.clone());
+ let expected_result = Result::Err::<&u32, &String>(&s);
+ assert_eq!(ref_err.as_deref(), expected_result);
+
+ let v = vec![5, 4, 3, 2, 1];
+ let ref_err = &Result::Err::<&u32, Vec<i32>>(v.clone());
+ let expected_result = Result::Err::<&u32, &Vec<i32>>(&v);
+ assert_eq!(ref_err.as_deref(), expected_result);
+}
+
+#[test]
+fn test_result_as_deref_mut() {
+ // &mut Result<T: DerefMut, E>::Ok(T).as_deref_mut() ->
+ // Result<&mut T::DerefMut::Target, &mut E>::Ok(&mut *T)
+ let mut val = 42;
+ let mut expected_val = 42;
+ let mut_ok = &mut Result::Ok::<&mut i32, u8>(&mut val);
+ let expected_result = Result::Ok::<&mut i32, &mut u8>(&mut expected_val);
+ assert_eq!(mut_ok.as_deref_mut(), expected_result);
+
+ let mut expected_string = String::from("a result");
+ let mut_ok = &mut Result::Ok::<String, u32>(expected_string.clone());
+ let expected_result = Result::Ok::<&mut str, &mut u32>(expected_string.deref_mut());
+ assert_eq!(mut_ok.as_deref_mut(), expected_result);
+
+ let mut expected_vec = vec![1, 2, 3, 4, 5];
+ let mut_ok = &mut Result::Ok::<Vec<i32>, u32>(expected_vec.clone());
+ let expected_result = Result::Ok::<&mut [i32], &mut u32>(expected_vec.as_mut_slice());
+ assert_eq!(mut_ok.as_deref_mut(), expected_result);
+
+ // &mut Result<T: DerefMut, E>::Err(T).as_deref_mut() ->
+ // Result<&mut T, &mut E>::Err(&mut *E)
+ let mut val = 41;
+ let mut_err = &mut Result::Err::<&mut u8, i32>(val);
+ let expected_result = Result::Err::<&mut u8, &mut i32>(&mut val);
+ assert_eq!(mut_err.as_deref_mut(), expected_result);
+
+ let mut expected_string = String::from("an error");
+ let mut_err = &mut Result::Err::<&mut u32, String>(expected_string.clone());
+ let expected_result = Result::Err::<&mut u32, &mut String>(&mut expected_string);
+ assert_eq!(mut_err.as_deref_mut(), expected_result);
+
+ let mut expected_vec = vec![5, 4, 3, 2, 1];
+ let mut_err = &mut Result::Err::<&mut u32, Vec<i32>>(expected_vec.clone());
+ let expected_result = Result::Err::<&mut u32, &mut Vec<i32>>(&mut expected_vec);
+ assert_eq!(mut_err.as_deref_mut(), expected_result);
+}
+
+#[test]
+fn result_const() {
+ // test that the methods of `Result` are usable in a const context
+
+ const RESULT: Result<usize, bool> = Ok(32);
+
+ const REF: Result<&usize, &bool> = RESULT.as_ref();
+ assert_eq!(REF, Ok(&32));
+
+ const IS_OK: bool = RESULT.is_ok();
+ assert!(IS_OK);
+
+ const IS_ERR: bool = RESULT.is_err();
+ assert!(!IS_ERR)
+}
--- /dev/null
+use core::cell::Cell;
+use core::result::Result::{Err, Ok};
+
+#[test]
+fn test_position() {
+ let b = [1, 2, 3, 5, 5];
+ assert_eq!(b.iter().position(|&v| v == 9), None);
+ assert_eq!(b.iter().position(|&v| v == 5), Some(3));
+ assert_eq!(b.iter().position(|&v| v == 3), Some(2));
+ assert_eq!(b.iter().position(|&v| v == 0), None);
+}
+
+#[test]
+fn test_rposition() {
+ let b = [1, 2, 3, 5, 5];
+ assert_eq!(b.iter().rposition(|&v| v == 9), None);
+ assert_eq!(b.iter().rposition(|&v| v == 5), Some(4));
+ assert_eq!(b.iter().rposition(|&v| v == 3), Some(2));
+ assert_eq!(b.iter().rposition(|&v| v == 0), None);
+}
+
+#[test]
+fn test_binary_search() {
+ let b: [i32; 0] = [];
+ assert_eq!(b.binary_search(&5), Err(0));
+
+ let b = [4];
+ assert_eq!(b.binary_search(&3), Err(0));
+ assert_eq!(b.binary_search(&4), Ok(0));
+ assert_eq!(b.binary_search(&5), Err(1));
+
+ let b = [1, 2, 4, 6, 8, 9];
+ assert_eq!(b.binary_search(&5), Err(3));
+ assert_eq!(b.binary_search(&6), Ok(3));
+ assert_eq!(b.binary_search(&7), Err(4));
+ assert_eq!(b.binary_search(&8), Ok(4));
+
+ let b = [1, 2, 4, 5, 6, 8];
+ assert_eq!(b.binary_search(&9), Err(6));
+
+ let b = [1, 2, 4, 6, 7, 8, 9];
+ assert_eq!(b.binary_search(&6), Ok(3));
+ assert_eq!(b.binary_search(&5), Err(3));
+ assert_eq!(b.binary_search(&8), Ok(5));
+
+ let b = [1, 2, 4, 5, 6, 8, 9];
+ assert_eq!(b.binary_search(&7), Err(5));
+ assert_eq!(b.binary_search(&0), Err(0));
+
+ let b = [1, 3, 3, 3, 7];
+ assert_eq!(b.binary_search(&0), Err(0));
+ assert_eq!(b.binary_search(&1), Ok(0));
+ assert_eq!(b.binary_search(&2), Err(1));
+ assert!(match b.binary_search(&3) {
+ Ok(1..=3) => true,
+ _ => false,
+ });
+ assert!(match b.binary_search(&3) {
+ Ok(1..=3) => true,
+ _ => false,
+ });
+ assert_eq!(b.binary_search(&4), Err(4));
+ assert_eq!(b.binary_search(&5), Err(4));
+ assert_eq!(b.binary_search(&6), Err(4));
+ assert_eq!(b.binary_search(&7), Ok(4));
+ assert_eq!(b.binary_search(&8), Err(5));
+}
+
+#[test]
+// Test implementation specific behavior when finding equivalent elements.
+// It is ok to break this test but when you do a crater run is highly advisable.
+fn test_binary_search_implementation_details() {
+ let b = [1, 1, 2, 2, 3, 3, 3];
+ assert_eq!(b.binary_search(&1), Ok(1));
+ assert_eq!(b.binary_search(&2), Ok(3));
+ assert_eq!(b.binary_search(&3), Ok(6));
+ let b = [1, 1, 1, 1, 1, 3, 3, 3, 3];
+ assert_eq!(b.binary_search(&1), Ok(4));
+ assert_eq!(b.binary_search(&3), Ok(8));
+ let b = [1, 1, 1, 1, 3, 3, 3, 3, 3];
+ assert_eq!(b.binary_search(&1), Ok(3));
+ assert_eq!(b.binary_search(&3), Ok(8));
+}
+
+#[test]
+fn test_partition_point() {
+ let b: [i32; 0] = [];
+ assert_eq!(b.partition_point(|&x| x < 5), 0);
+
+ let b = [4];
+ assert_eq!(b.partition_point(|&x| x < 3), 0);
+ assert_eq!(b.partition_point(|&x| x < 4), 0);
+ assert_eq!(b.partition_point(|&x| x < 5), 1);
+
+ let b = [1, 2, 4, 6, 8, 9];
+ assert_eq!(b.partition_point(|&x| x < 5), 3);
+ assert_eq!(b.partition_point(|&x| x < 6), 3);
+ assert_eq!(b.partition_point(|&x| x < 7), 4);
+ assert_eq!(b.partition_point(|&x| x < 8), 4);
+
+ let b = [1, 2, 4, 5, 6, 8];
+ assert_eq!(b.partition_point(|&x| x < 9), 6);
+
+ let b = [1, 2, 4, 6, 7, 8, 9];
+ assert_eq!(b.partition_point(|&x| x < 6), 3);
+ assert_eq!(b.partition_point(|&x| x < 5), 3);
+ assert_eq!(b.partition_point(|&x| x < 8), 5);
+
+ let b = [1, 2, 4, 5, 6, 8, 9];
+ assert_eq!(b.partition_point(|&x| x < 7), 5);
+ assert_eq!(b.partition_point(|&x| x < 0), 0);
+
+ let b = [1, 3, 3, 3, 7];
+ assert_eq!(b.partition_point(|&x| x < 0), 0);
+ assert_eq!(b.partition_point(|&x| x < 1), 0);
+ assert_eq!(b.partition_point(|&x| x < 2), 1);
+ assert_eq!(b.partition_point(|&x| x < 3), 1);
+ assert_eq!(b.partition_point(|&x| x < 4), 4);
+ assert_eq!(b.partition_point(|&x| x < 5), 4);
+ assert_eq!(b.partition_point(|&x| x < 6), 4);
+ assert_eq!(b.partition_point(|&x| x < 7), 4);
+ assert_eq!(b.partition_point(|&x| x < 8), 5);
+}
+
+#[test]
+fn test_iterator_nth() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth(i).unwrap(), &v[i]);
+ }
+ assert_eq!(v.iter().nth(v.len()), None);
+
+ let mut iter = v.iter();
+ assert_eq!(iter.nth(2).unwrap(), &v[2]);
+ assert_eq!(iter.nth(1).unwrap(), &v[4]);
+}
+
+#[test]
+fn test_iterator_nth_back() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ for i in 0..v.len() {
+ assert_eq!(v.iter().nth_back(i).unwrap(), &v[v.len() - i - 1]);
+ }
+ assert_eq!(v.iter().nth_back(v.len()), None);
+
+ let mut iter = v.iter();
+ assert_eq!(iter.nth_back(2).unwrap(), &v[2]);
+ assert_eq!(iter.nth_back(1).unwrap(), &v[0]);
+}
+
+#[test]
+fn test_iterator_last() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ assert_eq!(v.iter().last().unwrap(), &4);
+ assert_eq!(v[..1].iter().last().unwrap(), &0);
+}
+
+#[test]
+fn test_iterator_count() {
+ let v: &[_] = &[0, 1, 2, 3, 4];
+ assert_eq!(v.iter().count(), 5);
+
+ let mut iter2 = v.iter();
+ iter2.next();
+ iter2.next();
+ assert_eq!(iter2.count(), 3);
+}
+
+#[test]
+fn test_chunks_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.chunks(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks(10);
+ assert_eq!(c3.nth_back(0).unwrap(), &[0, 1, 2, 3, 4]);
+ assert_eq!(c3.next(), None);
+
+ let v4: &[i32] = &[0, 1, 2];
+ let mut c4 = v4.chunks(10);
+ assert_eq!(c4.nth_back(1_000_000_000usize), None);
+}
+
+#[test]
+fn test_chunks_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks(2);
+ assert_eq!(c.last().unwrap()[1], 5);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks(2);
+ assert_eq!(c2.last().unwrap()[0], 4);
+}
+
+#[test]
+fn test_chunks_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .chunks(2)
+ .zip(v2.chunks(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![14, 22, 14]);
+}
+
+#[test]
+fn test_chunks_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_mut(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.chunks_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c1 = v1.chunks_mut(3);
+ assert_eq!(c1.nth_back(1).unwrap(), &[0, 1, 2]);
+ assert_eq!(c1.next(), None);
+
+ let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks_mut(10);
+ assert_eq!(c3.nth_back(0).unwrap(), &[0, 1, 2, 3, 4]);
+ assert_eq!(c3.next(), None);
+
+ let v4: &mut [i32] = &mut [0, 1, 2];
+ let mut c4 = v4.chunks_mut(10);
+ assert_eq!(c4.nth_back(1_000_000_000usize), None);
+}
+
+#[test]
+fn test_chunks_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_mut(2);
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_mut(2);
+ assert_eq!(c2.last().unwrap(), &[4]);
+}
+
+#[test]
+fn test_chunks_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.chunks_mut(2).zip(v2.chunks(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [13, 14, 19, 20, 14]);
+}
+
+#[test]
+fn test_chunks_exact_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.chunks_exact(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_exact_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.chunks_exact(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_exact_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks_exact(3);
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks_exact(10);
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_chunks_exact_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact(2);
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact(2);
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_chunks_exact_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.chunks_exact(2);
+ assert_eq!(c.remainder(), &[4]);
+}
+
+#[test]
+fn test_chunks_exact_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .chunks_exact(2)
+ .zip(v2.chunks_exact(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![14, 22]);
+}
+
+#[test]
+fn test_chunks_exact_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact_mut(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.chunks_exact_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_chunks_exact_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.chunks_exact_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_chunks_exact_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.chunks_exact_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.chunks_exact_mut(3);
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c3 = v3.chunks_exact_mut(10);
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_chunks_exact_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.chunks_exact_mut(2);
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.chunks_exact_mut(2);
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_chunks_exact_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c = v.chunks_exact_mut(2);
+ assert_eq!(c.into_remainder(), &[4]);
+}
+
+#[test]
+fn test_chunks_exact_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.chunks_exact_mut(2).zip(v2.chunks_exact(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [13, 14, 19, 20, 4]);
+}
+
+#[test]
+fn test_array_chunks_infer() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, -4];
+ let c = v.array_chunks();
+ for &[a, b, c] in c {
+ assert_eq!(a + b + c, 3);
+ }
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let total = v2.array_chunks().map(|&[a, b]| a * b).sum::<i32>();
+ assert_eq!(total, 2 * 3 + 4 * 5);
+}
+
+#[test]
+fn test_array_chunks_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks::<3>();
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks::<2>();
+ assert_eq!(c2.count(), 2);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.array_chunks::<2>();
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_array_chunks_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks::<2>();
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.array_chunks::<3>();
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_array_chunks_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks::<2>();
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.array_chunks::<3>();
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c3 = v3.array_chunks::<10>();
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_array_chunks_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks::<2>();
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks::<2>();
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_array_chunks_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.array_chunks::<2>();
+ assert_eq!(c.remainder(), &[4]);
+}
+
+#[test]
+fn test_array_chunks_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .array_chunks::<2>()
+ .zip(v2.array_chunks::<2>())
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![14, 22]);
+}
+
+#[test]
+fn test_array_chunks_mut_infer() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ for a in v.array_chunks_mut() {
+ let sum = a.iter().sum::<i32>();
+ *a = [sum; 3];
+ }
+ assert_eq!(v, &[3, 3, 3, 12, 12, 12, 6]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ v2.array_chunks_mut().for_each(|[a, b]| core::mem::swap(a, b));
+ assert_eq!(v2, &[1, 0, 3, 2, 5, 4, 6]);
+}
+
+#[test]
+fn test_array_chunks_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks_mut::<3>();
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks_mut::<2>();
+ assert_eq!(c2.count(), 2);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.array_chunks_mut::<2>();
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_array_chunks_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks_mut::<2>();
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.array_chunks_mut::<3>();
+ assert_eq!(c2.nth(1).unwrap(), &[3, 4, 5]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_array_chunks_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.array_chunks_mut::<2>();
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+ assert_eq!(c.next(), None);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.array_chunks_mut::<3>();
+ assert_eq!(c2.nth_back(0).unwrap(), &[0, 1, 2]);
+ assert_eq!(c2.next(), None);
+ assert_eq!(c2.next_back(), None);
+
+ let v3: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c3 = v3.array_chunks_mut::<10>();
+ assert_eq!(c3.nth_back(0), None);
+}
+
+#[test]
+fn test_array_chunks_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.array_chunks_mut::<2>();
+ assert_eq!(c.last().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.array_chunks_mut::<2>();
+ assert_eq!(c2.last().unwrap(), &[2, 3]);
+}
+
+#[test]
+fn test_array_chunks_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c = v.array_chunks_mut::<2>();
+ assert_eq!(c.into_remainder(), &[4]);
+}
+
+#[test]
+fn test_array_chunks_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.array_chunks_mut::<2>().zip(v2.array_chunks::<2>()) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [13, 14, 19, 20, 4]);
+}
+
+#[test]
+fn test_array_windows_infer() {
+ let v: &[i32] = &[0, 1, 0, 1];
+ assert_eq!(v.array_windows::<2>().count(), 3);
+ let c = v.array_windows();
+ for &[a, b] in c {
+ assert_eq!(a + b, 1);
+ }
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let total = v2.array_windows().map(|&[a, b, c]| a + b + c).sum::<i32>();
+ assert_eq!(total, 3 + 6 + 9 + 12 + 15);
+}
+
+#[test]
+fn test_array_windows_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.array_windows::<3>();
+ assert_eq!(c.count(), 4);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.array_windows::<6>();
+ assert_eq!(c2.count(), 0);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.array_windows::<2>();
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_array_windows_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let snd = v.array_windows::<4>().nth(1);
+ assert_eq!(snd, Some(&[1, 2, 3, 4]));
+ let mut arr_windows = v.array_windows::<2>();
+ assert_ne!(arr_windows.nth(0), arr_windows.nth(0));
+ let last = v.array_windows::<3>().last();
+ assert_eq!(last, Some(&[3, 4, 5]));
+}
+
+#[test]
+fn test_array_windows_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let snd = v.array_windows::<4>().nth_back(1);
+ assert_eq!(snd, Some(&[1, 2, 3, 4]));
+ let mut arr_windows = v.array_windows::<2>();
+ assert_ne!(arr_windows.nth_back(0), arr_windows.nth_back(0));
+}
+
+#[test]
+fn test_rchunks_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.rchunks(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks(3);
+ assert_eq!(c2.nth(1).unwrap(), &[0, 1]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[2, 3, 4]);
+ assert_eq!(c2.next_back(), None);
+}
+
+#[test]
+fn test_rchunks_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks(2);
+ assert_eq!(c.last().unwrap()[1], 1);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks(2);
+ assert_eq!(c2.last().unwrap()[0], 0);
+}
+
+#[test]
+fn test_rchunks_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .rchunks(2)
+ .zip(v2.rchunks(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![26, 18, 6]);
+}
+
+#[test]
+fn test_rchunks_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_mut(2);
+ assert_eq!(c2.count(), 3);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.rchunks_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[0, 1]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let mut c2 = v2.rchunks_mut(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[2, 3, 4]);
+ assert_eq!(c2.next_back(), None);
+}
+
+#[test]
+fn test_rchunks_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_mut(2);
+ assert_eq!(c.last().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_mut(2);
+ assert_eq!(c2.last().unwrap(), &[0]);
+}
+
+#[test]
+fn test_rchunks_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.rchunks_mut(2).zip(v2.rchunks(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [6, 16, 17, 22, 23]);
+}
+
+#[test]
+fn test_rchunks_exact_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.rchunks_exact(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_exact_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact(3);
+ assert_eq!(c2.nth(1).unwrap(), &[1, 2, 3]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[4, 5, 6]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact(2);
+ assert_eq!(c.last().unwrap(), &[0, 1]);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact(2);
+ assert_eq!(c2.last().unwrap(), &[1, 2]);
+}
+
+#[test]
+fn test_rchunks_exact_remainder() {
+ let v: &[i32] = &[0, 1, 2, 3, 4];
+ let c = v.rchunks_exact(2);
+ assert_eq!(c.remainder(), &[0]);
+}
+
+#[test]
+fn test_rchunks_exact_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .rchunks_exact(2)
+ .zip(v2.rchunks_exact(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+ assert_eq!(res, vec![26, 18]);
+}
+
+#[test]
+fn test_rchunks_exact_mut_count() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact_mut(3);
+ assert_eq!(c.count(), 2);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact_mut(2);
+ assert_eq!(c2.count(), 2);
+
+ let v3: &mut [i32] = &mut [];
+ let c3 = v3.rchunks_exact_mut(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_rchunks_exact_mut_nth() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact_mut(2);
+ assert_eq!(c.nth(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact_mut(3);
+ assert_eq!(c2.nth(1).unwrap(), &[1, 2, 3]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_mut_nth_back() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let mut c = v.rchunks_exact_mut(2);
+ assert_eq!(c.nth_back(1).unwrap(), &[2, 3]);
+ assert_eq!(c.next_back().unwrap(), &[4, 5]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4, 5, 6];
+ let mut c2 = v2.rchunks_exact_mut(3);
+ assert_eq!(c2.nth_back(1).unwrap(), &[4, 5, 6]);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_rchunks_exact_mut_last() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5];
+ let c = v.rchunks_exact_mut(2);
+ assert_eq!(c.last().unwrap(), &[0, 1]);
+
+ let v2: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c2 = v2.rchunks_exact_mut(2);
+ assert_eq!(c2.last().unwrap(), &[1, 2]);
+}
+
+#[test]
+fn test_rchunks_exact_mut_remainder() {
+ let v: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let c = v.rchunks_exact_mut(2);
+ assert_eq!(c.into_remainder(), &[0]);
+}
+
+#[test]
+fn test_rchunks_exact_mut_zip() {
+ let v1: &mut [i32] = &mut [0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ for (a, b) in v1.rchunks_exact_mut(2).zip(v2.rchunks_exact(2)) {
+ let sum = b.iter().sum::<i32>();
+ for v in a {
+ *v += sum;
+ }
+ }
+ assert_eq!(v1, [0, 16, 17, 22, 23]);
+}
+
+#[test]
+fn test_windows_count() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.windows(3);
+ assert_eq!(c.count(), 4);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.windows(6);
+ assert_eq!(c2.count(), 0);
+
+ let v3: &[i32] = &[];
+ let c3 = v3.windows(2);
+ assert_eq!(c3.count(), 0);
+}
+
+#[test]
+fn test_windows_nth() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.windows(2);
+ assert_eq!(c.nth(2).unwrap()[1], 3);
+ assert_eq!(c.next().unwrap()[0], 3);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.windows(4);
+ assert_eq!(c2.nth(1).unwrap()[1], 2);
+ assert_eq!(c2.next(), None);
+}
+
+#[test]
+fn test_windows_nth_back() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let mut c = v.windows(2);
+ assert_eq!(c.nth_back(2).unwrap()[0], 2);
+ assert_eq!(c.next_back().unwrap()[1], 2);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let mut c2 = v2.windows(4);
+ assert_eq!(c2.nth_back(1).unwrap()[1], 1);
+ assert_eq!(c2.next_back(), None);
+}
+
+#[test]
+fn test_windows_last() {
+ let v: &[i32] = &[0, 1, 2, 3, 4, 5];
+ let c = v.windows(2);
+ assert_eq!(c.last().unwrap()[1], 5);
+
+ let v2: &[i32] = &[0, 1, 2, 3, 4];
+ let c2 = v2.windows(2);
+ assert_eq!(c2.last().unwrap()[0], 3);
+}
+
+#[test]
+fn test_windows_zip() {
+ let v1: &[i32] = &[0, 1, 2, 3, 4];
+ let v2: &[i32] = &[6, 7, 8, 9, 10];
+
+ let res = v1
+ .windows(2)
+ .zip(v2.windows(2))
+ .map(|(a, b)| a.iter().sum::<i32>() + b.iter().sum::<i32>())
+ .collect::<Vec<_>>();
+
+ assert_eq!(res, [14, 18, 22, 26]);
+}
+
+#[test]
+#[allow(const_err)]
+fn test_iter_ref_consistency() {
+ use std::fmt::Debug;
+
+ fn test<T: Copy + Debug + PartialEq>(x: T) {
+ let v: &[T] = &[x, x, x];
+ let v_ptrs: [*const T; 3] = match v {
+ [ref v1, ref v2, ref v3] => [v1 as *const _, v2 as *const _, v3 as *const _],
+ _ => unreachable!(),
+ };
+ let len = v.len();
+
+ // nth(i)
+ for i in 0..len {
+ assert_eq!(&v[i] as *const _, v_ptrs[i]); // check the v_ptrs array, just to be sure
+ let nth = v.iter().nth(i).unwrap();
+ assert_eq!(nth as *const _, v_ptrs[i]);
+ }
+ assert_eq!(v.iter().nth(len), None, "nth(len) should return None");
+
+ // stepping through with nth(0)
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let next = it.nth(0).unwrap();
+ assert_eq!(next as *const _, v_ptrs[i]);
+ }
+ assert_eq!(it.nth(0), None);
+ }
+
+ // next()
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let next = it.next().unwrap();
+ assert_eq!(next as *const _, v_ptrs[i]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None, "The final call to next() should return None");
+ }
+
+ // next_back()
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let prev = it.next_back().unwrap();
+ assert_eq!(prev as *const _, v_ptrs[remaining - 1]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next_back(), None, "The final call to next_back() should return None");
+ }
+ }
+
+ fn test_mut<T: Copy + Debug + PartialEq>(x: T) {
+ let v: &mut [T] = &mut [x, x, x];
+ let v_ptrs: [*mut T; 3] = match v {
+ [ref v1, ref v2, ref v3] => {
+ [v1 as *const _ as *mut _, v2 as *const _ as *mut _, v3 as *const _ as *mut _]
+ }
+ _ => unreachable!(),
+ };
+ let len = v.len();
+
+ // nth(i)
+ for i in 0..len {
+ assert_eq!(&mut v[i] as *mut _, v_ptrs[i]); // check the v_ptrs array, just to be sure
+ let nth = v.iter_mut().nth(i).unwrap();
+ assert_eq!(nth as *mut _, v_ptrs[i]);
+ }
+ assert_eq!(v.iter().nth(len), None, "nth(len) should return None");
+
+ // stepping through with nth(0)
+ {
+ let mut it = v.iter();
+ for i in 0..len {
+ let next = it.nth(0).unwrap();
+ assert_eq!(next as *const _, v_ptrs[i]);
+ }
+ assert_eq!(it.nth(0), None);
+ }
+
+ // next()
+ {
+ let mut it = v.iter_mut();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let next = it.next().unwrap();
+ assert_eq!(next as *mut _, v_ptrs[i]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None, "The final call to next() should return None");
+ }
+
+ // next_back()
+ {
+ let mut it = v.iter_mut();
+ for i in 0..len {
+ let remaining = len - i;
+ assert_eq!(it.size_hint(), (remaining, Some(remaining)));
+
+ let prev = it.next_back().unwrap();
+ assert_eq!(prev as *mut _, v_ptrs[remaining - 1]);
+ }
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next_back(), None, "The final call to next_back() should return None");
+ }
+ }
+
+ // Make sure iterators and slice patterns yield consistent addresses for various types,
+ // including ZSTs.
+ test(0u32);
+ test(());
+ test([0u32; 0]); // ZST with alignment > 0
+ test_mut(0u32);
+ test_mut(());
+ test_mut([0u32; 0]); // ZST with alignment > 0
+}
+
+// The current implementation of SliceIndex fails to handle methods
+// orthogonally from range types; therefore, it is worth testing
+// all of the indexing operations on each input.
+mod slice_index {
+ // This checks all six indexing methods, given an input range that
+ // should succeed. (it is NOT suitable for testing invalid inputs)
+ macro_rules! assert_range_eq {
+ ($arr:expr, $range:expr, $expected:expr) => {
+ let mut arr = $arr;
+ let mut expected = $expected;
+ {
+ let s: &[_] = &arr;
+ let expected: &[_] = &expected;
+
+ assert_eq!(&s[$range], expected, "(in assertion for: index)");
+ assert_eq!(s.get($range), Some(expected), "(in assertion for: get)");
+ unsafe {
+ assert_eq!(
+ s.get_unchecked($range),
+ expected,
+ "(in assertion for: get_unchecked)",
+ );
+ }
+ }
+ {
+ let s: &mut [_] = &mut arr;
+ let expected: &mut [_] = &mut expected;
+
+ assert_eq!(&mut s[$range], expected, "(in assertion for: index_mut)",);
+ assert_eq!(
+ s.get_mut($range),
+ Some(&mut expected[..]),
+ "(in assertion for: get_mut)",
+ );
+ unsafe {
+ assert_eq!(
+ s.get_unchecked_mut($range),
+ expected,
+ "(in assertion for: get_unchecked_mut)",
+ );
+ }
+ }
+ };
+ }
+
+ // Make sure the macro can actually detect bugs,
+ // because if it can't, then what are we even doing here?
+ //
+ // (Be aware this only demonstrates the ability to detect bugs
+ // in the FIRST method that panics, as the macro is not designed
+ // to be used in `should_panic`)
+ #[test]
+ #[should_panic(expected = "out of range")]
+ fn assert_range_eq_can_fail_by_panic() {
+ assert_range_eq!([0, 1, 2], 0..5, [0, 1, 2]);
+ }
+
+ // (Be aware this only demonstrates the ability to detect bugs
+ // in the FIRST method it calls, as the macro is not designed
+ // to be used in `should_panic`)
+ #[test]
+ #[should_panic(expected = "==")]
+ fn assert_range_eq_can_fail_by_inequality() {
+ assert_range_eq!([0, 1, 2], 0..2, [0, 1, 2]);
+ }
+
+ // Test cases for bad index operations.
+ //
+ // This generates `should_panic` test cases for Index/IndexMut
+ // and `None` test cases for get/get_mut.
+ macro_rules! panic_cases {
+ ($(
+ // each test case needs a unique name to namespace the tests
+ in mod $case_name:ident {
+ data: $data:expr;
+
+ // optional:
+ //
+ // one or more similar inputs for which data[input] succeeds,
+ // and the corresponding output as an array. This helps validate
+ // "critical points" where an input range straddles the boundary
+ // between valid and invalid.
+ // (such as the input `len..len`, which is just barely valid)
+ $(
+ good: data[$good:expr] == $output:expr;
+ )*
+
+ bad: data[$bad:expr];
+ message: $expect_msg:expr;
+ }
+ )*) => {$(
+ mod $case_name {
+ #[test]
+ fn pass() {
+ let mut v = $data;
+
+ $( assert_range_eq!($data, $good, $output); )*
+
+ {
+ let v: &[_] = &v;
+ assert_eq!(v.get($bad), None, "(in None assertion for get)");
+ }
+
+ {
+ let v: &mut [_] = &mut v;
+ assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)");
+ }
+ }
+
+ #[test]
+ #[should_panic(expected = $expect_msg)]
+ fn index_fail() {
+ let v = $data;
+ let v: &[_] = &v;
+ let _v = &v[$bad];
+ }
+
+ #[test]
+ #[should_panic(expected = $expect_msg)]
+ fn index_mut_fail() {
+ let mut v = $data;
+ let v: &mut [_] = &mut v;
+ let _v = &mut v[$bad];
+ }
+ }
+ )*};
+ }
+
+ #[test]
+ fn simple() {
+ let v = [0, 1, 2, 3, 4, 5];
+
+ assert_range_eq!(v, .., [0, 1, 2, 3, 4, 5]);
+ assert_range_eq!(v, ..2, [0, 1]);
+ assert_range_eq!(v, ..=1, [0, 1]);
+ assert_range_eq!(v, 2.., [2, 3, 4, 5]);
+ assert_range_eq!(v, 1..4, [1, 2, 3]);
+ assert_range_eq!(v, 1..=3, [1, 2, 3]);
+ }
+
+ panic_cases! {
+ in mod rangefrom_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[6..] == [];
+ bad: data[7..];
+ message: "out of range";
+ }
+
+ in mod rangeto_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[..6] == [0, 1, 2, 3, 4, 5];
+ bad: data[..7];
+ message: "out of range";
+ }
+
+ in mod rangetoinclusive_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[..=5] == [0, 1, 2, 3, 4, 5];
+ bad: data[..=6];
+ message: "out of range";
+ }
+
+ in mod rangeinclusive_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[0..=5] == [0, 1, 2, 3, 4, 5];
+ bad: data[0..=6];
+ message: "out of range";
+ }
+
+ in mod range_len_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[6..6] == [];
+ bad: data[7..7];
+ message: "out of range";
+ }
+
+ in mod rangeinclusive_len_len {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[6..=5] == [];
+ bad: data[7..=6];
+ message: "out of range";
+ }
+ }
+
+ panic_cases! {
+ in mod rangeinclusive_exhausted {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[0..=5] == [0, 1, 2, 3, 4, 5];
+ good: data[{
+ let mut iter = 0..=5;
+ iter.by_ref().count(); // exhaust it
+ iter
+ }] == [];
+
+ // 0..=6 is out of range before exhaustion, so it
+ // stands to reason that it still would be after.
+ bad: data[{
+ let mut iter = 0..=6;
+ iter.by_ref().count(); // exhaust it
+ iter
+ }];
+ message: "out of range";
+ }
+ }
+
+ panic_cases! {
+ in mod range_neg_width {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[4..4] == [];
+ bad: data[4..3];
+ message: "but ends at";
+ }
+
+ in mod rangeinclusive_neg_width {
+ data: [0, 1, 2, 3, 4, 5];
+
+ good: data[4..=3] == [];
+ bad: data[4..=2];
+ message: "but ends at";
+ }
+ }
+
+ panic_cases! {
+ in mod rangeinclusive_overflow {
+ data: [0, 1];
+
+ // note: using 0 specifically ensures that the result of overflowing is 0..0,
+ // so that `get` doesn't simply return None for the wrong reason.
+ bad: data[0 ..= usize::MAX];
+ message: "maximum usize";
+ }
+
+ in mod rangetoinclusive_overflow {
+ data: [0, 1];
+
+ bad: data[..= usize::MAX];
+ message: "maximum usize";
+ }
+ } // panic_cases!
+}
+
+#[test]
+fn test_find_rfind() {
+ let v = [0, 1, 2, 3, 4, 5];
+ let mut iter = v.iter();
+ let mut i = v.len();
+ while let Some(&elt) = iter.rfind(|_| true) {
+ i -= 1;
+ assert_eq!(elt, v[i]);
+ }
+ assert_eq!(i, 0);
+ assert_eq!(v.iter().rfind(|&&x| x <= 3), Some(&3));
+}
+
+#[test]
+fn test_iter_folds() {
+ let a = [1, 2, 3, 4, 5]; // len>4 so the unroll is used
+ assert_eq!(a.iter().fold(0, |acc, &x| 2 * acc + x), 57);
+ assert_eq!(a.iter().rfold(0, |acc, &x| 2 * acc + x), 129);
+ let fold = |acc: i32, &x| acc.checked_mul(2)?.checked_add(x);
+ assert_eq!(a.iter().try_fold(0, &fold), Some(57));
+ assert_eq!(a.iter().try_rfold(0, &fold), Some(129));
+
+ // short-circuiting try_fold, through other methods
+ let a = [0, 1, 2, 3, 5, 5, 5, 7, 8, 9];
+ let mut iter = a.iter();
+ assert_eq!(iter.position(|&x| x == 3), Some(3));
+ assert_eq!(iter.rfind(|&&x| x == 5), Some(&5));
+ assert_eq!(iter.len(), 2);
+}
+
+#[test]
+fn test_rotate_left() {
+ const N: usize = 600;
+ let a: &mut [_] = &mut [0; N];
+ for i in 0..N {
+ a[i] = i;
+ }
+
+ a.rotate_left(42);
+ let k = N - 42;
+
+ for i in 0..N {
+ assert_eq!(a[(i + k) % N], i);
+ }
+}
+
+#[test]
+fn test_rotate_right() {
+ const N: usize = 600;
+ let a: &mut [_] = &mut [0; N];
+ for i in 0..N {
+ a[i] = i;
+ }
+
+ a.rotate_right(42);
+
+ for i in 0..N {
+ assert_eq!(a[(i + 42) % N], i);
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn brute_force_rotate_test_0() {
+ // In case of edge cases involving multiple algorithms
+ let n = 300;
+ for len in 0..n {
+ for s in 0..len {
+ let mut v = Vec::with_capacity(len);
+ for i in 0..len {
+ v.push(i);
+ }
+ v[..].rotate_right(s);
+ for i in 0..v.len() {
+ assert_eq!(v[i], v.len().wrapping_add(i.wrapping_sub(s)) % v.len());
+ }
+ }
+ }
+}
+
+#[test]
+fn brute_force_rotate_test_1() {
+ // `ptr_rotate` covers so many kinds of pointer usage, that this is just a good test for
+ // pointers in general. This uses a `[usize; 4]` to hit all algorithms without overwhelming miri
+ let n = 30;
+ for len in 0..n {
+ for s in 0..len {
+ let mut v: Vec<[usize; 4]> = Vec::with_capacity(len);
+ for i in 0..len {
+ v.push([i, 0, 0, 0]);
+ }
+ v[..].rotate_right(s);
+ for i in 0..v.len() {
+ assert_eq!(v[i][0], v.len().wrapping_add(i.wrapping_sub(s)) % v.len());
+ }
+ }
+ }
+}
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))]
+fn sort_unstable() {
+ use core::cmp::Ordering::{Equal, Greater, Less};
+ use core::slice::heapsort;
+ use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
+
+ // Miri is too slow (but still need to `chain` to make the types match)
+ let lens = if cfg!(miri) { (2..20).chain(0..0) } else { (2..25).chain(500..510) };
+ let rounds = if cfg!(miri) { 1 } else { 100 };
+
+ let mut v = [0; 600];
+ let mut tmp = [0; 600];
+ let mut rng = StdRng::from_entropy();
+
+ for len in lens {
+ let v = &mut v[0..len];
+ let tmp = &mut tmp[0..len];
+
+ for &modulus in &[5, 10, 100, 1000] {
+ for _ in 0..rounds {
+ for i in 0..len {
+ v[i] = rng.gen::<i32>() % modulus;
+ }
+
+ // Sort in default order.
+ tmp.copy_from_slice(v);
+ tmp.sort_unstable();
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in ascending order.
+ tmp.copy_from_slice(v);
+ tmp.sort_unstable_by(|a, b| a.cmp(b));
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in descending order.
+ tmp.copy_from_slice(v);
+ tmp.sort_unstable_by(|a, b| b.cmp(a));
+ assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+
+ // Test heapsort using `<` operator.
+ tmp.copy_from_slice(v);
+ heapsort(tmp, |a, b| a < b);
+ assert!(tmp.windows(2).all(|w| w[0] <= w[1]));
+
+ // Test heapsort using `>` operator.
+ tmp.copy_from_slice(v);
+ heapsort(tmp, |a, b| a > b);
+ assert!(tmp.windows(2).all(|w| w[0] >= w[1]));
+ }
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+ v.sort_unstable_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort_unstable();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+
+ // Should not panic.
+ [0i32; 0].sort_unstable();
+ [(); 10].sort_unstable();
+ [(); 100].sort_unstable();
+
+ let mut v = [0xDEADBEEFu64];
+ v.sort_unstable();
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+#[cfg(not(target_arch = "wasm32"))]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn select_nth_unstable() {
+ use core::cmp::Ordering::{Equal, Greater, Less};
+ use rand::rngs::StdRng;
+ use rand::seq::SliceRandom;
+ use rand::{Rng, SeedableRng};
+
+ let mut rng = StdRng::from_entropy();
+
+ for len in (2..21).chain(500..501) {
+ let mut orig = vec![0; len];
+
+ for &modulus in &[5, 10, 1000] {
+ for _ in 0..10 {
+ for i in 0..len {
+ orig[i] = rng.gen::<i32>() % modulus;
+ }
+
+ let v_sorted = {
+ let mut v = orig.clone();
+ v.sort();
+ v
+ };
+
+ // Sort in default order.
+ for pivot in 0..len {
+ let mut v = orig.clone();
+ v.select_nth_unstable(pivot);
+
+ assert_eq!(v_sorted[pivot], v[pivot]);
+ for i in 0..pivot {
+ for j in pivot..len {
+ assert!(v[i] <= v[j]);
+ }
+ }
+ }
+
+ // Sort in ascending order.
+ for pivot in 0..len {
+ let mut v = orig.clone();
+ let (left, pivot, right) = v.select_nth_unstable_by(pivot, |a, b| a.cmp(b));
+
+ assert_eq!(left.len() + right.len(), len - 1);
+
+ for l in left {
+ assert!(l <= pivot);
+ for r in right.iter_mut() {
+ assert!(l <= r);
+ assert!(pivot <= r);
+ }
+ }
+ }
+
+ // Sort in descending order.
+ let sort_descending_comparator = |a: &i32, b: &i32| b.cmp(a);
+ let v_sorted_descending = {
+ let mut v = orig.clone();
+ v.sort_by(sort_descending_comparator);
+ v
+ };
+
+ for pivot in 0..len {
+ let mut v = orig.clone();
+ v.select_nth_unstable_by(pivot, sort_descending_comparator);
+
+ assert_eq!(v_sorted_descending[pivot], v[pivot]);
+ for i in 0..pivot {
+ for j in pivot..len {
+ assert!(v[j] <= v[i]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Sort at index using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v = [0; 500];
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+
+ for pivot in 0..v.len() {
+ v.select_nth_unstable_by(pivot, |_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+ }
+
+ // Should not panic.
+ [(); 10].select_nth_unstable(0);
+ [(); 10].select_nth_unstable(5);
+ [(); 10].select_nth_unstable(9);
+ [(); 100].select_nth_unstable(0);
+ [(); 100].select_nth_unstable(50);
+ [(); 100].select_nth_unstable(99);
+
+ let mut v = [0xDEADBEEFu64];
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+#[should_panic(expected = "index 0 greater than length of slice")]
+fn select_nth_unstable_zero_length() {
+ [0i32; 0].select_nth_unstable(0);
+}
+
+#[test]
+#[should_panic(expected = "index 20 greater than length of slice")]
+fn select_nth_unstable_past_length() {
+ [0i32; 10].select_nth_unstable(20);
+}
+
+pub mod memchr {
+ use core::slice::memchr::{memchr, memrchr};
+
+ // test fallback implementations on all platforms
+ #[test]
+ fn matches_one() {
+ assert_eq!(Some(0), memchr(b'a', b"a"));
+ }
+
+ #[test]
+ fn matches_begin() {
+ assert_eq!(Some(0), memchr(b'a', b"aaaa"));
+ }
+
+ #[test]
+ fn matches_end() {
+ assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
+ }
+
+ #[test]
+ fn matches_nul() {
+ assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
+ }
+
+ #[test]
+ fn matches_past_nul() {
+ assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
+ }
+
+ #[test]
+ fn no_match_empty() {
+ assert_eq!(None, memchr(b'a', b""));
+ }
+
+ #[test]
+ fn no_match() {
+ assert_eq!(None, memchr(b'a', b"xyz"));
+ }
+
+ #[test]
+ fn matches_one_reversed() {
+ assert_eq!(Some(0), memrchr(b'a', b"a"));
+ }
+
+ #[test]
+ fn matches_begin_reversed() {
+ assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
+ }
+
+ #[test]
+ fn matches_end_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
+ }
+
+ #[test]
+ fn matches_nul_reversed() {
+ assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
+ }
+
+ #[test]
+ fn matches_past_nul_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
+ }
+
+ #[test]
+ fn no_match_empty_reversed() {
+ assert_eq!(None, memrchr(b'a', b""));
+ }
+
+ #[test]
+ fn no_match_reversed() {
+ assert_eq!(None, memrchr(b'a', b"xyz"));
+ }
+
+ #[test]
+ fn each_alignment_reversed() {
+ let mut data = [1u8; 64];
+ let needle = 2;
+ let pos = 40;
+ data[pos] = needle;
+ for start in 0..16 {
+ assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
+ }
+ }
+}
+
+#[test]
+fn test_align_to_simple() {
+ let bytes = [1u8, 2, 3, 4, 5, 6, 7];
+ let (prefix, aligned, suffix) = unsafe { bytes.align_to::<u16>() };
+ assert_eq!(aligned.len(), 3);
+ assert!(prefix == [1] || suffix == [7]);
+ let expect1 = [1 << 8 | 2, 3 << 8 | 4, 5 << 8 | 6];
+ let expect2 = [1 | 2 << 8, 3 | 4 << 8, 5 | 6 << 8];
+ let expect3 = [2 << 8 | 3, 4 << 8 | 5, 6 << 8 | 7];
+ let expect4 = [2 | 3 << 8, 4 | 5 << 8, 6 | 7 << 8];
+ assert!(
+ aligned == expect1 || aligned == expect2 || aligned == expect3 || aligned == expect4,
+ "aligned={:?} expected={:?} || {:?} || {:?} || {:?}",
+ aligned,
+ expect1,
+ expect2,
+ expect3,
+ expect4
+ );
+}
+
+#[test]
+fn test_align_to_zst() {
+ let bytes = [1, 2, 3, 4, 5, 6, 7];
+ let (prefix, aligned, suffix) = unsafe { bytes.align_to::<()>() };
+ assert_eq!(aligned.len(), 0);
+ assert!(prefix == [1, 2, 3, 4, 5, 6, 7] || suffix == [1, 2, 3, 4, 5, 6, 7]);
+}
+
+#[test]
+fn test_align_to_non_trivial() {
+ #[repr(align(8))]
+ struct U64(u64, u64);
+ #[repr(align(8))]
+ struct U64U64U32(u64, u64, u32);
+ let data = [
+ U64(1, 2),
+ U64(3, 4),
+ U64(5, 6),
+ U64(7, 8),
+ U64(9, 10),
+ U64(11, 12),
+ U64(13, 14),
+ U64(15, 16),
+ ];
+ let (prefix, aligned, suffix) = unsafe { data.align_to::<U64U64U32>() };
+ assert_eq!(aligned.len(), 4);
+ assert_eq!(prefix.len() + suffix.len(), 2);
+}
+
+#[test]
+fn test_align_to_empty_mid() {
+ use core::mem;
+
+ // Make sure that we do not create empty unaligned slices for the mid part, even when the
+ // overall slice is too short to contain an aligned address.
+ let bytes = [1, 2, 3, 4, 5, 6, 7];
+ type Chunk = u32;
+ for offset in 0..4 {
+ let (_, mid, _) = unsafe { bytes[offset..offset + 1].align_to::<Chunk>() };
+ assert_eq!(mid.as_ptr() as usize % mem::align_of::<Chunk>(), 0);
+ }
+}
+
+#[test]
+fn test_align_to_mut_aliasing() {
+ let mut val = [1u8, 2, 3, 4, 5];
+ // `align_to_mut` used to create `mid` in a way that there was some intermediate
+ // incorrect aliasing, invalidating the resulting `mid` slice.
+ let (begin, mid, end) = unsafe { val.align_to_mut::<[u8; 2]>() };
+ assert!(begin.len() == 0);
+ assert!(end.len() == 1);
+ mid[0] = mid[1];
+ assert_eq!(val, [3, 4, 3, 4, 5])
+}
+
+#[test]
+fn test_slice_partition_dedup_by() {
+ let mut slice: [i32; 9] = [1, -1, 2, 3, 1, -5, 5, -2, 2];
+
+ let (dedup, duplicates) = slice.partition_dedup_by(|a, b| a.abs() == b.abs());
+
+ assert_eq!(dedup, [1, 2, 3, 1, -5, -2]);
+ assert_eq!(duplicates, [5, -1, 2]);
+}
+
+#[test]
+fn test_slice_partition_dedup_empty() {
+ let mut slice: [i32; 0] = [];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, []);
+ assert_eq!(duplicates, []);
+}
+
+#[test]
+fn test_slice_partition_dedup_one() {
+ let mut slice = [12];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, [12]);
+ assert_eq!(duplicates, []);
+}
+
+#[test]
+fn test_slice_partition_dedup_multiple_ident() {
+ let mut slice = [12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, [12, 11]);
+ assert_eq!(duplicates, [12, 12, 12, 12, 11, 11, 11, 11, 11]);
+}
+
+#[test]
+fn test_slice_partition_dedup_partialeq() {
+ #[derive(Debug)]
+ struct Foo(i32, i32);
+
+ impl PartialEq for Foo {
+ fn eq(&self, other: &Foo) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ let mut slice = [Foo(0, 1), Foo(0, 5), Foo(1, 7), Foo(1, 9)];
+
+ let (dedup, duplicates) = slice.partition_dedup();
+
+ assert_eq!(dedup, [Foo(0, 1), Foo(1, 7)]);
+ assert_eq!(duplicates, [Foo(0, 5), Foo(1, 9)]);
+}
+
+#[test]
+fn test_copy_within() {
+ // Start to end, with a RangeTo.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(..3, 10);
+ assert_eq!(&bytes, b"Hello, WorHel");
+
+ // End to start, with a RangeFrom.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(10.., 0);
+ assert_eq!(&bytes, b"ld!lo, World!");
+
+ // Overlapping, with a RangeInclusive.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(0..=11, 1);
+ assert_eq!(&bytes, b"HHello, World");
+
+ // Whole slice, with a RangeFull.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(.., 0);
+ assert_eq!(&bytes, b"Hello, World!");
+
+ // Ensure that copying at the end of slice won't cause UB.
+ let mut bytes = *b"Hello, World!";
+ bytes.copy_within(13..13, 5);
+ assert_eq!(&bytes, b"Hello, World!");
+ bytes.copy_within(5..5, 13);
+ assert_eq!(&bytes, b"Hello, World!");
+}
+
+#[test]
+#[should_panic(expected = "range end index 14 out of range for slice of length 13")]
+fn test_copy_within_panics_src_too_long() {
+ let mut bytes = *b"Hello, World!";
+ // The length is only 13, so 14 is out of bounds.
+ bytes.copy_within(10..14, 0);
+}
+
+#[test]
+#[should_panic(expected = "dest is out of bounds")]
+fn test_copy_within_panics_dest_too_long() {
+ let mut bytes = *b"Hello, World!";
+ // The length is only 13, so a slice of length 4 starting at index 10 is out of bounds.
+ bytes.copy_within(0..4, 10);
+}
+#[test]
+#[should_panic(expected = "slice index starts at 2 but ends at 1")]
+fn test_copy_within_panics_src_inverted() {
+ let mut bytes = *b"Hello, World!";
+ // 2 is greater than 1, so this range is invalid.
+ bytes.copy_within(2..1, 0);
+}
+#[test]
+#[should_panic(expected = "attempted to index slice up to maximum usize")]
+fn test_copy_within_panics_src_out_of_bounds() {
+ let mut bytes = *b"Hello, World!";
+ // an inclusive range ending at usize::MAX would make src_end overflow
+ bytes.copy_within(usize::MAX..=usize::MAX, 0);
+}
+
+#[test]
+fn test_is_sorted() {
+ let empty: [i32; 0] = [];
+
+ assert!([1, 2, 2, 9].is_sorted());
+ assert!(![1, 3, 2].is_sorted());
+ assert!([0].is_sorted());
+ assert!(empty.is_sorted());
+ assert!(![0.0, 1.0, f32::NAN].is_sorted());
+ assert!([-2, -1, 0, 3].is_sorted());
+ assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs()));
+ assert!(!["c", "bb", "aaa"].is_sorted());
+ assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
+}
+
+#[test]
+fn test_slice_run_destructors() {
+ // Make sure that destructors get run on slice literals
+ struct Foo<'a> {
+ x: &'a Cell<isize>,
+ }
+
+ impl<'a> Drop for Foo<'a> {
+ fn drop(&mut self) {
+ self.x.set(self.x.get() + 1);
+ }
+ }
+
+ fn foo(x: &Cell<isize>) -> Foo<'_> {
+ Foo { x }
+ }
+
+ let x = &Cell::new(0);
+
+ {
+ let l = &[foo(x)];
+ assert_eq!(l[0].x.get(), 0);
+ }
+
+ assert_eq!(x.get(), 1);
+}
--- /dev/null
+// All `str` tests live in liballoc/tests
--- /dev/null
+use core::str::lossy::*;
+
+#[test]
+fn chunks() {
+ let mut iter = Utf8Lossy::from_bytes(b"hello").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "hello", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes("ศไทย中华Việt Nam".as_bytes()).chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "ศไทย中华Việt Nam", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"Hello\xC2 There\xFF Goodbye").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC2" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xFF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC0" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xE6\x83" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF5foo\xF5\x80bar").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF5" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF5" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF1foo\xF1\x80bar\xF1\x80\x80baz").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF1" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF1\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF1\x80\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF4foo\xF4\x80bar\xF4\xBFbaz").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF4" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF4\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF4" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ let mut iter = Utf8Lossy::from_bytes(b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF0" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo\u{10000}bar", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+
+ // surrogates
+ let mut iter = Utf8Lossy::from_bytes(b"\xED\xA0\x80foo\xED\xBF\xBFbar").chunks();
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xED" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xA0" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xED" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF" }), iter.next());
+ assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"" }), iter.next());
+ assert_eq!(None, iter.next());
+}
+
+#[test]
+fn display() {
+ assert_eq!(
+ "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye",
+ &Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string()
+ );
+}
+
+#[test]
+fn debug() {
+ assert_eq!(
+ "\"Hello\\xc0\\x80 There\\xe6\\x83 Goodbye\\u{10d4ea}\"",
+ &format!(
+ "{:?}",
+ Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye\xf4\x8d\x93\xaa")
+ )
+ );
+}
--- /dev/null
+use core::task::Poll;
+
+#[test]
+fn poll_const() {
+ // test that the methods of `Poll` are usable in a const context
+
+ const POLL: Poll<usize> = Poll::Pending;
+
+ const IS_READY: bool = POLL.is_ready();
+ assert!(!IS_READY);
+
+ const IS_PENDING: bool = POLL.is_pending();
+ assert!(IS_PENDING);
+}
--- /dev/null
+use core::time::Duration;
+
+#[test]
+fn creation() {
+ assert_ne!(Duration::from_secs(1), Duration::from_secs(0));
+ assert_eq!(Duration::from_secs(1) + Duration::from_secs(2), Duration::from_secs(3));
+ assert_eq!(
+ Duration::from_millis(10) + Duration::from_secs(4),
+ Duration::new(4, 10 * 1_000_000)
+ );
+ assert_eq!(Duration::from_millis(4000), Duration::new(4, 0));
+}
+
+#[test]
+#[should_panic]
+fn new_overflow() {
+ let _ = Duration::new(u64::MAX, 1_000_000_000);
+}
+
+#[test]
+fn secs() {
+ assert_eq!(Duration::new(0, 0).as_secs(), 0);
+ assert_eq!(Duration::new(0, 500_000_005).as_secs(), 0);
+ assert_eq!(Duration::new(0, 1_050_000_001).as_secs(), 1);
+ assert_eq!(Duration::from_secs(1).as_secs(), 1);
+ assert_eq!(Duration::from_millis(999).as_secs(), 0);
+ assert_eq!(Duration::from_millis(1001).as_secs(), 1);
+ assert_eq!(Duration::from_micros(999_999).as_secs(), 0);
+ assert_eq!(Duration::from_micros(1_000_001).as_secs(), 1);
+ assert_eq!(Duration::from_nanos(999_999_999).as_secs(), 0);
+ assert_eq!(Duration::from_nanos(1_000_000_001).as_secs(), 1);
+}
+
+#[test]
+fn millis() {
+ assert_eq!(Duration::new(0, 0).subsec_millis(), 0);
+ assert_eq!(Duration::new(0, 500_000_005).subsec_millis(), 500);
+ assert_eq!(Duration::new(0, 1_050_000_001).subsec_millis(), 50);
+ assert_eq!(Duration::from_secs(1).subsec_millis(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_millis(), 999);
+ assert_eq!(Duration::from_millis(1001).subsec_millis(), 1);
+ assert_eq!(Duration::from_micros(999_999).subsec_millis(), 999);
+ assert_eq!(Duration::from_micros(1_001_000).subsec_millis(), 1);
+ assert_eq!(Duration::from_nanos(999_999_999).subsec_millis(), 999);
+ assert_eq!(Duration::from_nanos(1_001_000_000).subsec_millis(), 1);
+}
+
+#[test]
+fn micros() {
+ assert_eq!(Duration::new(0, 0).subsec_micros(), 0);
+ assert_eq!(Duration::new(0, 500_000_005).subsec_micros(), 500_000);
+ assert_eq!(Duration::new(0, 1_050_000_001).subsec_micros(), 50_000);
+ assert_eq!(Duration::from_secs(1).subsec_micros(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_micros(), 999_000);
+ assert_eq!(Duration::from_millis(1001).subsec_micros(), 1_000);
+ assert_eq!(Duration::from_micros(999_999).subsec_micros(), 999_999);
+ assert_eq!(Duration::from_micros(1_000_001).subsec_micros(), 1);
+ assert_eq!(Duration::from_nanos(999_999_999).subsec_micros(), 999_999);
+ assert_eq!(Duration::from_nanos(1_000_001_000).subsec_micros(), 1);
+}
+
+#[test]
+fn nanos() {
+ assert_eq!(Duration::new(0, 0).subsec_nanos(), 0);
+ assert_eq!(Duration::new(0, 5).subsec_nanos(), 5);
+ assert_eq!(Duration::new(0, 1_000_000_001).subsec_nanos(), 1);
+ assert_eq!(Duration::from_secs(1).subsec_nanos(), 0);
+ assert_eq!(Duration::from_millis(999).subsec_nanos(), 999_000_000);
+ assert_eq!(Duration::from_millis(1001).subsec_nanos(), 1_000_000);
+ assert_eq!(Duration::from_micros(999_999).subsec_nanos(), 999_999_000);
+ assert_eq!(Duration::from_micros(1_000_001).subsec_nanos(), 1000);
+ assert_eq!(Duration::from_nanos(999_999_999).subsec_nanos(), 999_999_999);
+ assert_eq!(Duration::from_nanos(1_000_000_001).subsec_nanos(), 1);
+}
+
+#[test]
+fn add() {
+ assert_eq!(Duration::new(0, 0) + Duration::new(0, 1), Duration::new(0, 1));
+ assert_eq!(Duration::new(0, 500_000_000) + Duration::new(0, 500_000_001), Duration::new(1, 1));
+}
+
+#[test]
+fn checked_add() {
+ assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1)));
+ assert_eq!(
+ Duration::new(0, 500_000_000).checked_add(Duration::new(0, 500_000_001)),
+ Some(Duration::new(1, 1))
+ );
+ assert_eq!(Duration::new(1, 0).checked_add(Duration::new(u64::MAX, 0)), None);
+}
+
+#[test]
+fn saturating_add() {
+ assert_eq!(Duration::new(0, 0).saturating_add(Duration::new(0, 1)), Duration::new(0, 1));
+ assert_eq!(
+ Duration::new(0, 500_000_000).saturating_add(Duration::new(0, 500_000_001)),
+ Duration::new(1, 1)
+ );
+ assert_eq!(Duration::new(1, 0).saturating_add(Duration::new(u64::MAX, 0)), Duration::MAX);
+}
+
+#[test]
+fn sub() {
+ assert_eq!(Duration::new(0, 1) - Duration::new(0, 0), Duration::new(0, 1));
+ assert_eq!(Duration::new(0, 500_000_001) - Duration::new(0, 500_000_000), Duration::new(0, 1));
+ assert_eq!(Duration::new(1, 0) - Duration::new(0, 1), Duration::new(0, 999_999_999));
+}
+
+#[test]
+fn checked_sub() {
+ assert_eq!(Duration::NANOSECOND.checked_sub(Duration::ZERO), Some(Duration::NANOSECOND));
+ assert_eq!(
+ Duration::SECOND.checked_sub(Duration::NANOSECOND),
+ Some(Duration::new(0, 999_999_999))
+ );
+ assert_eq!(Duration::ZERO.checked_sub(Duration::NANOSECOND), None);
+ assert_eq!(Duration::ZERO.checked_sub(Duration::SECOND), None);
+}
+
+#[test]
+fn saturating_sub() {
+ assert_eq!(Duration::NANOSECOND.saturating_sub(Duration::ZERO), Duration::NANOSECOND);
+ assert_eq!(
+ Duration::SECOND.saturating_sub(Duration::NANOSECOND),
+ Duration::new(0, 999_999_999)
+ );
+ assert_eq!(Duration::ZERO.saturating_sub(Duration::NANOSECOND), Duration::ZERO);
+ assert_eq!(Duration::ZERO.saturating_sub(Duration::SECOND), Duration::ZERO);
+}
+
+#[test]
+#[should_panic]
+fn sub_bad1() {
+ let _ = Duration::new(0, 0) - Duration::new(0, 1);
+}
+
+#[test]
+#[should_panic]
+fn sub_bad2() {
+ let _ = Duration::new(0, 0) - Duration::new(1, 0);
+}
+
+#[test]
+fn mul() {
+ assert_eq!(Duration::new(0, 1) * 2, Duration::new(0, 2));
+ assert_eq!(Duration::new(1, 1) * 3, Duration::new(3, 3));
+ assert_eq!(Duration::new(0, 500_000_001) * 4, Duration::new(2, 4));
+ assert_eq!(Duration::new(0, 500_000_001) * 4000, Duration::new(2000, 4000));
+}
+
+#[test]
+fn checked_mul() {
+ assert_eq!(Duration::new(0, 1).checked_mul(2), Some(Duration::new(0, 2)));
+ assert_eq!(Duration::new(1, 1).checked_mul(3), Some(Duration::new(3, 3)));
+ assert_eq!(Duration::new(0, 500_000_001).checked_mul(4), Some(Duration::new(2, 4)));
+ assert_eq!(Duration::new(0, 500_000_001).checked_mul(4000), Some(Duration::new(2000, 4000)));
+ assert_eq!(Duration::new(u64::MAX - 1, 0).checked_mul(2), None);
+}
+
+#[test]
+fn saturating_mul() {
+ assert_eq!(Duration::new(0, 1).saturating_mul(2), Duration::new(0, 2));
+ assert_eq!(Duration::new(1, 1).saturating_mul(3), Duration::new(3, 3));
+ assert_eq!(Duration::new(0, 500_000_001).saturating_mul(4), Duration::new(2, 4));
+ assert_eq!(Duration::new(0, 500_000_001).saturating_mul(4000), Duration::new(2000, 4000));
+ assert_eq!(Duration::new(u64::MAX - 1, 0).saturating_mul(2), Duration::MAX);
+}
+
+#[test]
+fn div() {
+ assert_eq!(Duration::new(0, 1) / 2, Duration::new(0, 0));
+ assert_eq!(Duration::new(1, 1) / 3, Duration::new(0, 333_333_333));
+ assert_eq!(Duration::new(99, 999_999_000) / 100, Duration::new(0, 999_999_990));
+}
+
+#[test]
+fn checked_div() {
+ assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0)));
+ assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000)));
+ assert_eq!(Duration::new(2, 0).checked_div(0), None);
+}
+
+#[test]
+fn correct_sum() {
+ let durations = [
+ Duration::new(1, 999_999_999),
+ Duration::new(2, 999_999_999),
+ Duration::new(0, 999_999_999),
+ Duration::new(0, 999_999_999),
+ Duration::new(0, 999_999_999),
+ Duration::new(5, 0),
+ ];
+ let sum = durations.iter().sum::<Duration>();
+ assert_eq!(sum, Duration::new(1 + 2 + 5 + 4, 1_000_000_000 - 5));
+}
+
+#[test]
+fn debug_formatting_extreme_values() {
+ assert_eq!(
+ format!("{:?}", Duration::new(18_446_744_073_709_551_615, 123_456_789)),
+ "18446744073709551615.123456789s"
+ );
+}
+
+#[test]
+fn debug_formatting_secs() {
+ assert_eq!(format!("{:?}", Duration::new(7, 000_000_000)), "7s");
+ assert_eq!(format!("{:?}", Duration::new(7, 100_000_000)), "7.1s");
+ assert_eq!(format!("{:?}", Duration::new(7, 000_010_000)), "7.00001s");
+ assert_eq!(format!("{:?}", Duration::new(7, 000_000_001)), "7.000000001s");
+ assert_eq!(format!("{:?}", Duration::new(7, 123_456_789)), "7.123456789s");
+
+ assert_eq!(format!("{:?}", Duration::new(88, 000_000_000)), "88s");
+ assert_eq!(format!("{:?}", Duration::new(88, 100_000_000)), "88.1s");
+ assert_eq!(format!("{:?}", Duration::new(88, 000_010_000)), "88.00001s");
+ assert_eq!(format!("{:?}", Duration::new(88, 000_000_001)), "88.000000001s");
+ assert_eq!(format!("{:?}", Duration::new(88, 123_456_789)), "88.123456789s");
+
+ assert_eq!(format!("{:?}", Duration::new(999, 000_000_000)), "999s");
+ assert_eq!(format!("{:?}", Duration::new(999, 100_000_000)), "999.1s");
+ assert_eq!(format!("{:?}", Duration::new(999, 000_010_000)), "999.00001s");
+ assert_eq!(format!("{:?}", Duration::new(999, 000_000_001)), "999.000000001s");
+ assert_eq!(format!("{:?}", Duration::new(999, 123_456_789)), "999.123456789s");
+}
+
+#[test]
+fn debug_formatting_millis() {
+ assert_eq!(format!("{:?}", Duration::new(0, 7_000_000)), "7ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_100_000)), "7.1ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_000_001)), "7.000001ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_123_456)), "7.123456ms");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 88_000_000)), "88ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_100_000)), "88.1ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_000_001)), "88.000001ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_123_456)), "88.123456ms");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 999_000_000)), "999ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_100_000)), "999.1ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_000_001)), "999.000001ms");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_123_456)), "999.123456ms");
+}
+
+#[test]
+fn debug_formatting_micros() {
+ assert_eq!(format!("{:?}", Duration::new(0, 7_000)), "7µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_100)), "7.1µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_001)), "7.001µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 7_123)), "7.123µs");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 88_000)), "88µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_100)), "88.1µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_001)), "88.001µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 88_123)), "88.123µs");
+
+ assert_eq!(format!("{:?}", Duration::new(0, 999_000)), "999µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_100)), "999.1µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_001)), "999.001µs");
+ assert_eq!(format!("{:?}", Duration::new(0, 999_123)), "999.123µs");
+}
+
+#[test]
+fn debug_formatting_nanos() {
+ assert_eq!(format!("{:?}", Duration::new(0, 0)), "0ns");
+ assert_eq!(format!("{:?}", Duration::new(0, 1)), "1ns");
+ assert_eq!(format!("{:?}", Duration::new(0, 88)), "88ns");
+ assert_eq!(format!("{:?}", Duration::new(0, 999)), "999ns");
+}
+
+#[test]
+fn debug_formatting_precision_zero() {
+ assert_eq!(format!("{:.0?}", Duration::new(0, 0)), "0ns");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 123)), "123ns");
+
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_001)), "1µs");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_499)), "1µs");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_500)), "2µs");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_999)), "2µs");
+
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_000_001)), "1ms");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_499_999)), "1ms");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_500_000)), "2ms");
+ assert_eq!(format!("{:.0?}", Duration::new(0, 1_999_999)), "2ms");
+
+ assert_eq!(format!("{:.0?}", Duration::new(1, 000_000_001)), "1s");
+ assert_eq!(format!("{:.0?}", Duration::new(1, 499_999_999)), "1s");
+ assert_eq!(format!("{:.0?}", Duration::new(1, 500_000_000)), "2s");
+ assert_eq!(format!("{:.0?}", Duration::new(1, 999_999_999)), "2s");
+}
+
+#[test]
+fn debug_formatting_precision_two() {
+ assert_eq!(format!("{:.2?}", Duration::new(0, 0)), "0.00ns");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 123)), "123.00ns");
+
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_000)), "1.00µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_001)), "7.00µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_100)), "7.10µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_109)), "7.11µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 7_199)), "7.20µs");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_999)), "2.00µs");
+
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_000_000)), "1.00ms");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 3_001_000)), "3.00ms");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 3_100_000)), "3.10ms");
+ assert_eq!(format!("{:.2?}", Duration::new(0, 1_999_999)), "2.00ms");
+
+ assert_eq!(format!("{:.2?}", Duration::new(1, 000_000_000)), "1.00s");
+ assert_eq!(format!("{:.2?}", Duration::new(4, 001_000_000)), "4.00s");
+ assert_eq!(format!("{:.2?}", Duration::new(2, 100_000_000)), "2.10s");
+ assert_eq!(format!("{:.2?}", Duration::new(2, 104_990_000)), "2.10s");
+ assert_eq!(format!("{:.2?}", Duration::new(2, 105_000_000)), "2.11s");
+ assert_eq!(format!("{:.2?}", Duration::new(8, 999_999_999)), "9.00s");
+}
+
+#[test]
+fn debug_formatting_precision_high() {
+ assert_eq!(format!("{:.5?}", Duration::new(0, 23_678)), "23.67800µs");
+
+ assert_eq!(format!("{:.9?}", Duration::new(1, 000_000_000)), "1.000000000s");
+ assert_eq!(format!("{:.10?}", Duration::new(4, 001_000_000)), "4.0010000000s");
+ assert_eq!(format!("{:.20?}", Duration::new(4, 001_000_000)), "4.00100000000000000000s");
+}
+
+#[test]
+fn duration_const() {
+ // test that the methods of `Duration` are usable in a const context
+
+ const DURATION: Duration = Duration::new(0, 123_456_789);
+
+ const SUB_SEC_MILLIS: u32 = DURATION.subsec_millis();
+ assert_eq!(SUB_SEC_MILLIS, 123);
+
+ const SUB_SEC_MICROS: u32 = DURATION.subsec_micros();
+ assert_eq!(SUB_SEC_MICROS, 123_456);
+
+ const SUB_SEC_NANOS: u32 = DURATION.subsec_nanos();
+ assert_eq!(SUB_SEC_NANOS, 123_456_789);
+
+ const IS_ZERO: bool = Duration::ZERO.is_zero();
+ assert!(IS_ZERO);
+
+ const SECONDS: u64 = Duration::SECOND.as_secs();
+ assert_eq!(SECONDS, 1);
+
+ const FROM_SECONDS: Duration = Duration::from_secs(1);
+ assert_eq!(FROM_SECONDS, Duration::SECOND);
+
+ const SECONDS_F32: f32 = Duration::SECOND.as_secs_f32();
+ assert_eq!(SECONDS_F32, 1.0);
+
+ const FROM_SECONDS_F32: Duration = Duration::from_secs_f32(1.0);
+ assert_eq!(FROM_SECONDS_F32, Duration::SECOND);
+
+ const SECONDS_F64: f64 = Duration::SECOND.as_secs_f64();
+ assert_eq!(SECONDS_F64, 1.0);
+
+ const FROM_SECONDS_F64: Duration = Duration::from_secs_f64(1.0);
+ assert_eq!(FROM_SECONDS_F64, Duration::SECOND);
+
+ const MILLIS: u128 = Duration::SECOND.as_millis();
+ assert_eq!(MILLIS, 1_000);
+
+ const FROM_MILLIS: Duration = Duration::from_millis(1_000);
+ assert_eq!(FROM_MILLIS, Duration::SECOND);
+
+ const MICROS: u128 = Duration::SECOND.as_micros();
+ assert_eq!(MICROS, 1_000_000);
+
+ const FROM_MICROS: Duration = Duration::from_micros(1_000_000);
+ assert_eq!(FROM_MICROS, Duration::SECOND);
+
+ const NANOS: u128 = Duration::SECOND.as_nanos();
+ assert_eq!(NANOS, 1_000_000_000);
+
+ const FROM_NANOS: Duration = Duration::from_nanos(1_000_000_000);
+ assert_eq!(FROM_NANOS, Duration::SECOND);
+
+ const MAX: Duration = Duration::new(u64::MAX, 999_999_999);
+
+ const CHECKED_ADD: Option<Duration> = MAX.checked_add(Duration::SECOND);
+ assert_eq!(CHECKED_ADD, None);
+
+ const CHECKED_SUB: Option<Duration> = Duration::ZERO.checked_sub(Duration::SECOND);
+ assert_eq!(CHECKED_SUB, None);
+
+ const CHECKED_MUL: Option<Duration> = Duration::SECOND.checked_mul(1);
+ assert_eq!(CHECKED_MUL, Some(Duration::SECOND));
+
+ const MUL_F32: Duration = Duration::SECOND.mul_f32(1.0);
+ assert_eq!(MUL_F32, Duration::SECOND);
+
+ const MUL_F64: Duration = Duration::SECOND.mul_f64(1.0);
+ assert_eq!(MUL_F64, Duration::SECOND);
+
+ const CHECKED_DIV: Option<Duration> = Duration::SECOND.checked_div(1);
+ assert_eq!(CHECKED_DIV, Some(Duration::SECOND));
+
+ const DIV_F32: Duration = Duration::SECOND.div_f32(1.0);
+ assert_eq!(DIV_F32, Duration::SECOND);
+
+ const DIV_F64: Duration = Duration::SECOND.div_f64(1.0);
+ assert_eq!(DIV_F64, Duration::SECOND);
+
+ const DIV_DURATION_F32: f32 = Duration::SECOND.div_duration_f32(Duration::SECOND);
+ assert_eq!(DIV_DURATION_F32, 1.0);
+
+ const DIV_DURATION_F64: f64 = Duration::SECOND.div_duration_f64(Duration::SECOND);
+ assert_eq!(DIV_DURATION_F64, 1.0);
+
+ const SATURATING_ADD: Duration = MAX.saturating_add(Duration::SECOND);
+ assert_eq!(SATURATING_ADD, MAX);
+
+ const SATURATING_SUB: Duration = Duration::ZERO.saturating_sub(Duration::SECOND);
+ assert_eq!(SATURATING_SUB, Duration::ZERO);
+
+ const SATURATING_MUL: Duration = MAX.saturating_mul(2);
+ assert_eq!(SATURATING_MUL, MAX);
+}
--- /dev/null
+use std::cmp::Ordering::{Equal, Greater, Less};
+
+#[test]
+fn test_clone() {
+ let a = (1, "2");
+ let b = a.clone();
+ assert_eq!(a, b);
+}
+
+#[test]
+fn test_partial_eq() {
+ let (small, big) = ((1, 2, 3), (3, 2, 1));
+ assert_eq!(small, small);
+ assert_eq!(big, big);
+ assert_ne!(small, big);
+ assert_ne!(big, small);
+}
+
+#[test]
+fn test_partial_ord() {
+ let (small, big) = ((1, 2, 3), (3, 2, 1));
+
+ assert!(small < big);
+ assert!(!(small < small));
+ assert!(!(big < small));
+ assert!(!(big < big));
+
+ assert!(small <= small);
+ assert!(big <= big);
+
+ assert!(big > small);
+ assert!(small >= small);
+ assert!(big >= small);
+ assert!(big >= big);
+
+ assert!(!((1.0f64, 2.0f64) < (f64::NAN, 3.0)));
+ assert!(!((1.0f64, 2.0f64) <= (f64::NAN, 3.0)));
+ assert!(!((1.0f64, 2.0f64) > (f64::NAN, 3.0)));
+ assert!(!((1.0f64, 2.0f64) >= (f64::NAN, 3.0)));
+ assert!(((1.0f64, 2.0f64) < (2.0, f64::NAN)));
+ assert!(!((2.0f64, 2.0f64) < (2.0, f64::NAN)));
+}
+
+#[test]
+fn test_ord() {
+ let (small, big) = ((1, 2, 3), (3, 2, 1));
+ assert_eq!(small.cmp(&small), Equal);
+ assert_eq!(big.cmp(&big), Equal);
+ assert_eq!(small.cmp(&big), Less);
+ assert_eq!(big.cmp(&small), Greater);
+}
+
+#[test]
+fn test_show() {
+ let s = format!("{:?}", (1,));
+ assert_eq!(s, "(1,)");
+ let s = format!("{:?}", (1, true));
+ assert_eq!(s, "(1, true)");
+ let s = format!("{:?}", (1, "hi", true));
+ assert_eq!(s, "(1, \"hi\", true)");
+}
--- /dev/null
+LIBCORE_TAG=1.49.0