]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
nr2.0: Fix some paths in test files
authorOwen Avery <powerboat9.gamer@gmail.com>
Wed, 14 May 2025 22:22:33 +0000 (18:22 -0400)
committerCohenArthur <arthur.cohen@embecosm.com>
Fri, 23 May 2025 14:46:49 +0000 (14:46 +0000)
This is similar to 9faba024ef18b9c4d67f22bd3b020b5e445fad0a, but it
applies to execute tests.

gcc/testsuite/ChangeLog:

* rust/execute/torture/for-loop1.rs: Adjust paths.
* rust/execute/torture/for-loop2.rs: Likewise.
* rust/execute/torture/iter1.rs: Likewise.

Signed-off-by: Owen Avery <powerboat9.gamer@gmail.com>
gcc/testsuite/rust/execute/torture/for-loop1.rs
gcc/testsuite/rust/execute/torture/for-loop2.rs
gcc/testsuite/rust/execute/torture/iter1.rs

index 5a6a70c37d64b2f249c84e130aaecc936c573c41..334218927b1365afe16e04cf96b1ba8c511a6c73 100644 (file)
@@ -102,30 +102,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -133,12 +133,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -146,7 +146,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -155,31 +155,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -194,7 +194,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -204,7 +204,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
index 5ba2cd1351a63b1e8580951aaf7ceea749e2f688..4f5dfe1a0a2602cf21c2b1f632916db87051f75f 100644 (file)
@@ -101,30 +101,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -132,12 +132,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -145,7 +145,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -154,31 +154,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -193,7 +193,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -203,7 +203,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }
 
index c3b6c7bc3f899530f6b487fbd19a6d7bb4536fbc..233eb60383b386fdef90807fcc0dee05cd22fe18 100644 (file)
@@ -99,30 +99,30 @@ mod ptr {
     #[lang = "const_ptr"]
     impl<T> *const T {
         pub unsafe fn offset(self, count: isize) -> *const T {
-            intrinsics::offset(self, count)
+            crate::intrinsics::offset(self, count)
         }
     }
 
     #[lang = "mut_ptr"]
     impl<T> *mut T {
         pub unsafe fn offset(self, count: isize) -> *mut T {
-            intrinsics::offset(self, count) as *mut T
+            crate::intrinsics::offset(self, count) as *mut T
         }
     }
 
     pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
         let x = x as *mut u8;
         let y = y as *mut u8;
-        let len = mem::size_of::<T>() * count;
+        let len = crate::mem::size_of::<T>() * count;
         swap_nonoverlapping_bytes(x, y, len)
     }
 
     pub unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
         // For types smaller than the block optimization below,
         // just swap directly to avoid pessimizing codegen.
-        if mem::size_of::<T>() < 32 {
+        if crate::mem::size_of::<T>() < 32 {
             let z = read(x);
-            intrinsics::copy_nonoverlapping(y, x, 1);
+            crate::intrinsics::copy_nonoverlapping(y, x, 1);
             write(y, z);
         } else {
             swap_nonoverlapping(x, y, 1);
@@ -130,12 +130,12 @@ mod ptr {
     }
 
     pub unsafe fn write<T>(dst: *mut T, src: T) {
-        intrinsics::move_val_init(&mut *dst, src)
+        crate::intrinsics::move_val_init(&mut *dst, src)
     }
 
     pub unsafe fn read<T>(src: *const T) -> T {
-        let mut tmp: T = mem::uninitialized();
-        intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
+        let mut tmp: T = crate::mem::uninitialized();
+        crate::intrinsics::copy_nonoverlapping(src, &mut tmp, 1);
         tmp
     }
 
@@ -143,7 +143,7 @@ mod ptr {
         struct Block(u64, u64, u64, u64);
         struct UnalignedBlock(u64, u64, u64, u64);
 
-        let block_size = mem::size_of::<Block>();
+        let block_size = crate::mem::size_of::<Block>();
 
         // Loop through x & y, copying them `Block` at a time
         // The optimizer should unroll the loop fully for most types
@@ -152,31 +152,31 @@ mod ptr {
         while i + block_size <= len {
             // Create some uninitialized memory as scratch space
             // Declaring `t` here avoids aligning the stack when this loop is unused
-            let mut t: Block = mem::uninitialized();
+            let mut t: Block = crate::mem::uninitialized();
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
             // Swap a block of bytes of x & y, using t as a temporary buffer
             // This should be optimized into efficient SIMD operations where available
-            intrinsics::copy_nonoverlapping(x, t, block_size);
-            intrinsics::copy_nonoverlapping(y, x, block_size);
-            intrinsics::copy_nonoverlapping(t, y, block_size);
+            crate::intrinsics::copy_nonoverlapping(x, t, block_size);
+            crate::intrinsics::copy_nonoverlapping(y, x, block_size);
+            crate::intrinsics::copy_nonoverlapping(t, y, block_size);
             i += block_size;
         }
 
         if i < len {
             // Swap any remaining bytes
-            let mut t: UnalignedBlock = mem::uninitialized();
+            let mut t: UnalignedBlock = crate::mem::uninitialized();
             let rem = len - i;
 
             let t = &mut t as *mut _ as *mut u8;
             let x = x.offset(i as isize);
             let y = y.offset(i as isize);
 
-            intrinsics::copy_nonoverlapping(x, t, rem);
-            intrinsics::copy_nonoverlapping(y, x, rem);
-            intrinsics::copy_nonoverlapping(t, y, rem);
+            crate::intrinsics::copy_nonoverlapping(x, t, rem);
+            crate::intrinsics::copy_nonoverlapping(y, x, rem);
+            crate::intrinsics::copy_nonoverlapping(t, y, rem);
         }
     }
 }
@@ -191,7 +191,7 @@ mod mem {
 
     pub fn swap<T>(x: &mut T, y: &mut T) {
         unsafe {
-            ptr::swap_nonoverlapping_one(x, y);
+            crate::ptr::swap_nonoverlapping_one(x, y);
         }
     }
 
@@ -201,7 +201,7 @@ mod mem {
     }
 
     pub unsafe fn uninitialized<T>() -> T {
-        intrinsics::uninit()
+        crate::intrinsics::uninit()
     }
 }