]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/testsuite/gdb.reverse/ppc_record_test_isa_3_1.c
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / gdb / testsuite / gdb.reverse / ppc_record_test_isa_3_1.c
CommitLineData
1bc0d95e
CL
1/* This testcase is part of GDB, the GNU debugger.
2
213516ef 3 Copyright 2012-2023 Free Software Foundation, Inc.
1bc0d95e
CL
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18/* Globals used for vector tests. */
19static vector unsigned long vec_xa, vec_xb, vec_xt;
20static unsigned long ra, rb, rs;
21
22int
23main ()
24{
45830439
CL
25
26 /* This test is used to verify the recording of the MMA instructions. The
27 names of the MMA instructions pmxbf16ger*, pmxvf32ger*,pmxvf64ger*,
28 pmxvi4ger8*, pmxvi8ger4* pmxvi16ger2* instructions were officially changed
29 to pmdmxbf16ger*, pmdmxvf32ger*, pmdmxvf64ger*, pmdmxvi4ger8*,
30 pmdmxvi8ger4*, pmdmxvi16ger* respectively. The old mnemonics are used in
31 this test for backward compatibity. */
1bc0d95e
CL
32 ra = 0xABCDEF012;
33 rb = 0;
34 rs = 0x012345678;
35
36 /* 9.0, 16.0, 25.0, 36.0 */
37 vec_xb = (vector unsigned long){0x4110000041800000, 0x41c8000042100000};
38
39 vec_xt = (vector unsigned long){0xFF00FF00FF00FF00, 0xAA00AA00AA00AA00};
40
41 /* Test 1, ISA 3.1 word instructions. Load source into r1, result of brh
42 put in r0. */
43 ra = 0xABCDEF012; /* stop 1 */
44 __asm__ __volatile__ ("pld 1, %0" :: "r" (ra ));
45 __asm__ __volatile__ ("brh 0, 1" );
46 ra = 0; /* stop 2 */
47
48 /* Test 2, ISA 3.1 MMA instructions with results in various ACC entries
49 xxsetaccz - ACC[3]
50 xvi4ger8 - ACC[4]
51 xvf16ger2pn - ACC[5]
52 pmxvi8ger4 - ACC[6]
53 pmxvf32gerpp - ACC[7] and fpscr */
54 /* Need to initialize the vs registers to a non zero value. */
55 ra = (unsigned long) & vec_xb;
56 __asm__ __volatile__ ("lxvd2x 12, %0, %1" :: "r" (ra ), "r" (rb));
57 __asm__ __volatile__ ("lxvd2x 13, %0, %1" :: "r" (ra ), "r" (rb));
58 __asm__ __volatile__ ("lxvd2x 14, %0, %1" :: "r" (ra ), "r" (rb));
59 __asm__ __volatile__ ("lxvd2x 15, %0, %1" :: "r" (ra ), "r" (rb));
60 vec_xa = (vector unsigned long){0x333134343987601, 0x9994bbbc9983307};
61 vec_xb = (vector unsigned long){0x411234041898760, 0x41c833042103400};
62 __asm__ __volatile__ ("lxvd2x 16, %0, %1" :: "r" (ra ), "r" (rb));
63 vec_xb = (vector unsigned long){0x123456789987650, 0x235676546989807};
64 __asm__ __volatile__ ("lxvd2x 17, %0, %1" :: "r" (ra ), "r" (rb));
65 vec_xb = (vector unsigned long){0x878363439823470, 0x413434c99839870};
66 __asm__ __volatile__ ("lxvd2x 18, %0, %1" :: "r" (ra ), "r" (rb));
67 vec_xb = (vector unsigned long){0x043765434398760, 0x419876555558850};
68 __asm__ __volatile__ ("lxvd2x 19, %0, %1" :: "r" (ra ), "r" (rb));
69 vec_xb = (vector unsigned long){0x33313434398760, 0x9994bbbc99899330};
70 __asm__ __volatile__ ("lxvd2x 20, %0, %1" :: "r" (ra ), "r" (rb));
71 __asm__ __volatile__ ("lxvd2x 21, %0, %1" :: "r" (ra ), "r" (rb));
72 __asm__ __volatile__ ("lxvd2x 22, %0, %1" :: "r" (ra ), "r" (rb));
73 __asm__ __volatile__ ("lxvd2x 23, %0, %1" :: "r" (ra ), "r" (rb));
74 __asm__ __volatile__ ("lxvd2x 24, %0, %1" :: "r" (ra ), "r" (rb));
75 __asm__ __volatile__ ("lxvd2x 25, %0, %1" :: "r" (ra ), "r" (rb));
76 __asm__ __volatile__ ("lxvd2x 26, %0, %1" :: "r" (ra ), "r" (rb));
77 __asm__ __volatile__ ("lxvd2x 27, %0, %1" :: "r" (ra ), "r" (rb));
78 vec_xa = (vector unsigned long){0x33313434398760, 0x9994bbbc998330};
79 vec_xb = (vector unsigned long){0x4110000041800000, 0x41c8000042100000};
80 __asm__ __volatile__ ("lxvd2x 28, %0, %1" :: "r" (ra ), "r" (rb));
81 vec_xb = (vector unsigned long){0x4567000046800000, 0x4458000048700000};
82 __asm__ __volatile__ ("lxvd2x 29, %0, %1" :: "r" (ra ), "r" (rb));
83 vec_xb = (vector unsigned long){0x41dd000041e00000, 0x41c8000046544400};
84 __asm__ __volatile__ ("lxvd2x 30, %0, %1" :: "r" (ra ), "r" (rb));
85
86 /* SNAN */
87 vec_xb = (vector unsigned long){0x7F8F00007F8F0000, 0x7F8F00007F8F0000};
88
89 __asm__ __volatile__ ("lxvd2x 31, %0, %1" :: "r" (ra ), "r" (rb));
90
91 ra = 0xAB; /* stop 3 */
92 __asm__ __volatile__ ("xxsetaccz 3");
93 __asm__ __volatile__ ("xvi4ger8 4, %x0, %x1" :: "wa" (vec_xa), \
94 "wa" (vec_xb) );
95 __asm__ __volatile__ ("xvf16ger2pn 5, %x0, %x1" :: "wa" (vec_xa),\
96 "wa" (vec_xb) );
45830439 97 /* Use the older instruction name for backward compatibility */
1bc0d95e
CL
98 __asm__ __volatile__ ("pmxvi8ger4spp 6, %x0, %x1, 11, 13, 5"
99 :: "wa" (vec_xa), "wa" (vec_xb) );
100 __asm__ __volatile__ ("pmxvf32gerpp 7, %x0, %x1, 11, 13"
101 :: "wa" (vec_xa), "wa" (vec_xb) );
102 ra = 0; /* stop 4 */
103}