]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
Fix testcase module path
authorPierre-Emmanuel Patry <pierre-emmanuel.patry@embecosm.com>
Thu, 20 Mar 2025 14:10:07 +0000 (15:10 +0100)
committerP-E-P <32375388+P-E-P@users.noreply.github.com>
Mon, 24 Mar 2025 13:15:51 +0000 (13:15 +0000)
Those tests are coming from libcore and module inlining was wrong, in
libcore there was a use declaration to import those modules which was
missing here.

gcc/testsuite/ChangeLog:

* rust/compile/issue-2330.rs: Use complete path from crate root.
* rust/compile/issue-1901.rs: Likewise.
* rust/compile/issue-1981.rs: Likewise.
* rust/compile/iterators1.rs: Likewise.
* rust/compile/sizeof-stray-infer-var-bug.rs: Likewise.
* rust/compile/for-loop1.rs: Likewise.
* rust/compile/for-loop2.rs: Likewise.
* rust/compile/torture/builtin_abort.rs: Likewise.
* rust/compile/torture/uninit-intrinsic-1.rs: Likewise.

Signed-off-by: Pierre-Emmanuel Patry <pierre-emmanuel.patry@embecosm.com>
gcc/testsuite/rust/compile/for-loop1.rs
gcc/testsuite/rust/compile/for-loop2.rs
gcc/testsuite/rust/compile/issue-1901.rs
gcc/testsuite/rust/compile/issue-1981.rs
gcc/testsuite/rust/compile/issue-2330.rs
gcc/testsuite/rust/compile/iterators1.rs
gcc/testsuite/rust/compile/sizeof-stray-infer-var-bug.rs
gcc/testsuite/rust/compile/torture/builtin_abort.rs
gcc/testsuite/rust/compile/torture/uninit-intrinsic-1.rs

index 1023ecde1c35ac5ddc0298a7dd539b0c63bb07df..21e0399161b55a74d260f3194d617eebacfa965b 100644 (file)
@@ -102,30 +102,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -146,7 +146,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -194,7 +194,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -204,7 +204,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
             impl $ty {
                 pub fn wrapping_add(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_add(self, rhs)
+                        crate::intrinsics::wrapping_add(self, rhs)
                     }
                 }
 
                 pub fn wrapping_sub(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_sub(self, rhs)
+                        crate::intrinsics::wrapping_sub(self, rhs)
                     }
                 }
 
                 pub fn rotate_left(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_left(self, n as Self)
+                        crate::intrinsics::rotate_left(self, n as Self)
                     }
                 }
 
                 pub fn rotate_right(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_right(self, n as Self)
+                        crate::intrinsics::rotate_right(self, n as Self)
                     }
                 }
 
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+                pub const fn from_le_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
                     Self::from_le(Self::from_ne_bytes(bytes))
                 }
 
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
-                    unsafe { mem::transmute(bytes) }
+                pub const fn from_ne_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
+                    unsafe { crate::mem::transmute(bytes) }
                 }
 
                 pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
                 }
 
                 pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
-                    let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+                    let (a, b) = unsafe { crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
                     (a as Self, b)
                 }
             }
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+            crate::mem::replace(self, 1)
         }
 
         #[inline]
         fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+            crate::mem::replace(self, 0)
         }
 
         #[inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
             // and this won't actually result in an extra check in an optimized build.
             match self.start.add_usize(1) {
                 Option::Some(mut n) => {
-                    mem::swap(&mut n, &mut self.start);
+                    crate::mem::swap(&mut n, &mut self.start);
                     Option::Some(n)
                 }
                 Option::None => Option::None,
index d18bddd51dbe3900de074355998fb020ec4b21ef..a0ad06613f510877b88dde951dbf1df0138b6d89 100644 (file)
@@ -102,30 +102,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -146,7 +146,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -194,7 +194,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -204,7 +204,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -214,25 +214,25 @@ macro_rules! impl_uint {
             impl $ty {
                 pub fn wrapping_add(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_add(self, rhs)
+                        crate::intrinsics::wrapping_add(self, rhs)
                     }
                 }
 
                 pub fn wrapping_sub(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_sub(self, rhs)
+                        crate::intrinsics::wrapping_sub(self, rhs)
                     }
                 }
 
                 pub fn rotate_left(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_left(self, n as Self)
+                        crate::intrinsics::rotate_left(self, n as Self)
                     }
                 }
 
                 pub fn rotate_right(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_right(self, n as Self)
+                        crate::intrinsics::rotate_right(self, n as Self)
                     }
                 }
 
@@ -243,7 +243,7 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_le_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
+                pub const fn from_le_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
                     Self::from_le(Self::from_ne_bytes(bytes))
                 }
 
@@ -254,8 +254,8 @@ macro_rules! impl_uint {
                     }
                 }
 
-                pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
-                    unsafe { mem::transmute(bytes) }
+                pub const fn from_ne_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
+                    unsafe { crate::mem::transmute(bytes) }
                 }
 
                 pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -268,7 +268,7 @@ macro_rules! impl_uint {
                 }
 
                 pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
-                    let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+                    let (a, b) = unsafe { crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
                     (a as Self, b)
                 }
             }
@@ -384,12 +384,12 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+            crate::mem::replace(self, 1)
         }
 
         #[inline]
         fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+            crate::mem::replace(self, 0)
         }
 
         #[inline]
@@ -505,7 +505,7 @@ impl<A: Step> Iterator for Range<A> {
             // and this won't actually result in an extra check in an optimized build.
             match self.start.add_usize(1) {
                 Option::Some(mut n) => {
-                    mem::swap(&mut n, &mut self.start);
+                    crate::mem::swap(&mut n, &mut self.start);
                     Option::Some(n)
                 }
                 Option::None => Option::None,
@@ -536,10 +536,12 @@ impl<I: Iterator> IntoIterator for I {
 
 pub fn main() {
     // make sure we can desugar for-loops inside other blocks
-    
+
     if true {
         for _ in 20usize..40usize {
-            unsafe { puts("loop\0" as *const str as *const i8); }
+            unsafe {
+                puts("loop\0" as *const str as *const i8);
+            }
         }
     }
 }
index cfd8ef44fcc52bbcbdf74bd65c9bdd808a93c4cb..b43e34f702f11be6ec52c859d97873077d0d242b 100644 (file)
@@ -13,14 +13,14 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 }
index bfd8d2c3417d892b56461f9a0e2f31721d6250d1..de9588c60adaa5e43e7577979ff74762f37a2ca0 100644 (file)
@@ -16,30 +16,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -47,12 +47,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -60,7 +60,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -69,31 +69,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -106,7 +106,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -116,7 +116,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -126,7 +126,7 @@ trait Step {
 
 impl Step for i32 {
     fn replace_zero(&mut self) -> Self {
-        mem::replace(self, 0)
+        crate::mem::replace(self, 0)
     }
 }
 
index 97c15033998f915b11d595d33ae02980004f51d5..6ab46c7c8ef85f63f220d90601f8b5ec74aa4f83 100644 (file)
@@ -95,30 +95,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -126,12 +126,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -139,7 +139,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -148,31 +148,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -185,7 +185,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -195,6 +195,6 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
index 1141758b14a7b70d9079129cb7bc47842c2fde06..2ea3d741c9f394a11456f78960d0235631775e8a 100644 (file)
@@ -98,30 +98,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -129,12 +129,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -142,7 +142,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -151,31 +151,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -190,7 +190,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -200,7 +200,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -210,30 +210,30 @@ macro_rules! impl_uint {
             impl $ty {
                 pub fn wrapping_add(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_add(self, rhs)
+                        crate::intrinsics::wrapping_add(self, rhs)
                     }
                 }
 
                 pub fn wrapping_sub(self, rhs: Self) -> Self {
                     unsafe {
-                        intrinsics::wrapping_sub(self, rhs)
+                        crate::intrinsics::wrapping_sub(self, rhs)
                     }
                 }
 
                 pub fn rotate_left(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_left(self, n as Self)
+                        crate::intrinsics::rotate_left(self, n as Self)
                     }
                 }
 
                 pub fn rotate_right(self, n: u32) -> Self {
                     unsafe {
-                        intrinsics::rotate_right(self, n as Self)
+                        crate::intrinsics::rotate_right(self, n as Self)
                     }
                 }
 
-                pub const fn from_ne_bytes(bytes: [u8; mem::size_of::<Self>()]) -> Self {
-                    unsafe { mem::transmute(bytes) }
+                pub const fn from_ne_bytes(bytes: [u8; crate::mem::size_of::<Self>()]) -> Self {
+                    unsafe { crate::mem::transmute(bytes) }
                 }
 
                 pub fn checked_add(self, rhs: Self) -> Option<Self> {
@@ -246,7 +246,7 @@ macro_rules! impl_uint {
                 }
 
                 pub fn overflowing_add(self, rhs: Self) -> (Self, bool) {
-                    let (a, b) = unsafe { intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
+                    let (a, b) = unsafe { crate::intrinsics::add_with_overflow(self as $ty, rhs as $ty) };
                     (a as Self, b)
                 }
             }
@@ -362,12 +362,12 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         fn replace_one(&mut self) -> Self {
-            mem::replace(self, 1)
+            crate::mem::replace(self, 1)
         }
 
         #[inline]
         fn replace_zero(&mut self) -> Self {
-            mem::replace(self, 0)
+            crate::mem::replace(self, 0)
         }
 
         #[inline]
@@ -482,7 +482,7 @@ impl<A: Step> Iterator for Range<A> {
             // and this won't actually result in an extra check in an optimized build.
             match self.start.add_usize(1) {
                 Option::Some(mut n) => {
-                    mem::swap(&mut n, &mut self.start);
+                    crate::mem::swap(&mut n, &mut self.start);
                     Option::Some(n)
                 }
                 Option::None => Option::None,
index 8275691703475c11f2cc522c218b26e7764d7521..c46a97d1539bd34ad569d35ee4e80cb1d23b8c70 100644 (file)
@@ -14,6 +14,6 @@ mod ptr {
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut T;
         let y = y as *mut T;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
     }
 }
index 3112cdc67f71f5074e166df00f3b06aa7a9c1b85..919caa4519f9029f47e09e14901e5a8a6aba1558 100644 (file)
@@ -12,7 +12,7 @@ mod intrinsics {
     }
 }
 
-pub fn main () -> i32 {
-    abort();
+pub fn main() -> i32 {
+    crate::intrinsics::abort();
     0
 }
index fa329c694ad5771d6db05713a8db513b4a4c46d1..af1cb541fc3f039618b094175b201ff762da8e44 100644 (file)
@@ -11,7 +11,7 @@ mod intrinsics {
 
 mod mem {
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
@@ -21,6 +21,6 @@ struct Foo(i32, i32);
 
 impl Foo {
     pub fn new() -> Self {
-        unsafe { mem::uninitialized::<Foo>() }
+        unsafe { crate::mem::uninitialized::<Foo>() }
     }
 }