]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/testsuite/gdb.base/bigcore.exp
* gdb.arch/altivec-abi.exp: Replace gdb_suppress_entire_file with
[thirdparty/binutils-gdb.git] / gdb / testsuite / gdb.base / bigcore.exp
1 # Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004, 2005
2 # Free Software Foundation, Inc.
3
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 2 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software
16 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 # Please email any bugs, comments, and/or additions to this file to:
19 # bug-gdb@prep.ai.mit.edu
20
21 # This file is based on corefile.exp which was written by Fred
22 # Fish. (fnf@cygnus.com)
23
24 if $tracelevel then {
25 strace $tracelevel
26 }
27
28 set prms_id 0
29 set bug_id 0
30
31 # Are we on a target board? As of 2004-02-12, GDB didn't have a
32 # mechanism that would let it efficiently access a remote corefile.
33
34 if ![isnative] then {
35 untested "Remote system"
36 return
37 }
38
39 # Can the system run this test (in particular support sparse
40 # corefiles)? On systems that lack sparse corefile support this test
41 # consumes too many resources - gigabytes worth of disk space and and
42 # I/O bandwith.
43
44 if { [istarget "*-*-*bsd*"]
45 || [istarget "*-*-hpux*"]
46 || [istarget "*-*-solaris*"]
47 || [istarget "*-*-cygwin*"] } {
48 untested "Kernel lacks sparse corefile support (PR gdb/1551)"
49 return
50 }
51
52 # This testcase causes too much stress (in terms of memory usage)
53 # on certain systems...
54 if { [istarget "*-*-*irix*"] } {
55 untested "Testcase too stressful for this system"
56 return
57 }
58
59 set testfile "bigcore"
60 set srcfile ${testfile}.c
61 set binfile ${objdir}/${subdir}/${testfile}
62 set corefile ${objdir}/${subdir}/${testfile}.corefile
63
64 if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
65 untested bigcore.exp
66 return -1
67 }
68
69 # Run GDB on the bigcore program up-to where it will dump core.
70
71 gdb_exit
72 gdb_start
73 gdb_reinitialize_dir $srcdir/$subdir
74 gdb_load ${binfile}
75 gdb_test "set print sevenbit-strings" "" \
76 "set print sevenbit-strings; ${testfile}"
77 gdb_test "set width 0" "" \
78 "set width 0; ${testfile}"
79 if { ![runto_main] } then {
80 gdb_suppress_tests;
81 }
82 set print_core_line [gdb_get_line_number "Dump core"]
83 gdb_test "tbreak $print_core_line"
84 gdb_test continue ".*print_string.*"
85 gdb_test next ".*0 = 0.*"
86
87 # Traverse part of bigcore's linked list of memory chunks (forward or
88 # backward), saving each chunk's address.
89
90 proc extract_heap { dir } {
91 global gdb_prompt
92 global expect_out
93 set heap ""
94 set test "extract ${dir} heap"
95 set lim 0
96 gdb_test_multiple "print heap.${dir}" "$test" {
97 -re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
98 pass "$test"
99 }
100 -re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
101 set heap [concat $heap $expect_out(1,string)]
102 if { $lim >= 50 } {
103 pass "$test (stop at $lim)"
104 } else {
105 incr lim
106 send_gdb "print \$.${dir}\n"
107 exp_continue
108 }
109 }
110 -re ".*$gdb_prompt $" {
111 fail "$test (entry $lim)"
112 }
113 timeout {
114 fail "$test (timeout)"
115 }
116 }
117 return $heap;
118 }
119 set next_heap [extract_heap next]
120 set prev_heap [extract_heap prev]
121
122 # Save the total allocated size within GDB so that we can check
123 # the core size later.
124 gdb_test "set \$bytes_allocated = bytes_allocated" "" "save heap size"
125
126 # Now create a core dump
127
128 # Rename the core file to "TESTFILE.corefile" rather than just "core",
129 # to avoid problems with sys admin types that like to regularly prune
130 # all files named "core" from the system.
131
132 # Some systems append "core" to the name of the program; others append
133 # the name of the program to "core"; still others (like Linux, as of
134 # May 2003) create cores named "core.PID".
135
136 # Save the process ID. Some systems dump the core into core.PID.
137 set test "grab pid"
138 gdb_test_multiple "info program" $test {
139 -re "child process (\[0-9\]+).*$gdb_prompt $" {
140 set inferior_pid $expect_out(1,string)
141 pass $test
142 }
143 -re "$gdb_prompt $" {
144 set inferior_pid unknown
145 pass $test
146 }
147 }
148
149 # Dump core using SIGABRT
150 set oldtimeout $timeout
151 set timeout 600
152 gdb_test "signal SIGABRT" "Program terminated with signal SIGABRT, .*"
153
154 # Find the corefile
155 set file ""
156 foreach pat [list core.${inferior_pid} ${testfile}.core core] {
157 set names [glob -nocomplain $pat]
158 if {[llength $names] == 1} {
159 set file [lindex $names 0]
160 remote_exec build "mv $file $corefile"
161 break
162 }
163 }
164
165 if { $file == "" } {
166 untested "Can't generate a core file"
167 return 0
168 }
169
170 # Check that the corefile is plausibly large enough. We're trying to
171 # detect the case where the operating system has truncated the file
172 # just before signed wraparound. TCL, unfortunately, has a similar
173 # problem - so use catch. It can handle the "bad" size but not
174 # necessarily the "good" one. And we must use GDB for the comparison,
175 # similarly.
176
177 if {[catch {file size $corefile} core_size] == 0} {
178 set core_ok 0
179 gdb_test_multiple "print \$bytes_allocated < $core_size" "check core size" {
180 -re " = 1\r\n$gdb_prompt $" {
181 pass "check core size"
182 set core_ok 1
183 }
184 -re " = 0\r\n$gdb_prompt $" {
185 pass "check core size"
186 set core_ok 0
187 }
188 }
189 } {
190 # Probably failed due to the TCL build having problems with very
191 # large values. Since GDB uses a 64-bit off_t (when possible) it
192 # shouldn't have this problem. Assume that things are going to
193 # work. Without this assumption the test is skiped on systems
194 # (such as i386 GNU/Linux with patched kernel) which do pass.
195 pass "check core size"
196 set core_ok 1
197 }
198 if {! $core_ok} {
199 untested "check core size (system does not support large corefiles)"
200 return 0
201 }
202
203 # Now load up that core file
204
205 set test "load corefile"
206 gdb_test_multiple "core $corefile" "$test" {
207 -re "A program is being debugged already. Kill it. .y or n. " {
208 send_gdb "y\n"
209 exp_continue
210 }
211 -re "Core was generated by.*$gdb_prompt $" {
212 pass "$test"
213 }
214 }
215
216 # Finally, re-traverse bigcore's linked list, checking each chunk's
217 # address against the executable. Don't use gdb_test_multiple as want
218 # only one pass/fail. Don't use exp_continue as the regular
219 # expression involving $heap needs to be re-evaluated for each new
220 # response.
221
222 proc check_heap { dir heap } {
223 global gdb_prompt
224 set test "check ${dir} heap"
225 set ok 1
226 set lim 0
227 send_gdb "print heap.${dir}\n"
228 while { $ok } {
229 gdb_expect {
230 -re " = \\(struct list \\*\\) [lindex $heap $lim].*$gdb_prompt $" {
231 if { $lim >= [llength $heap] } {
232 pass "$test"
233 set ok 0
234 } else {
235 incr lim
236 send_gdb "print \$.${dir}\n"
237 }
238 }
239 -re ".*$gdb_prompt $" {
240 fail "$test (address [lindex $heap $lim])"
241 set ok 0
242 }
243 timeout {
244 fail "$test (timeout)"
245 set ok 0
246 }
247 }
248 }
249 }
250
251 check_heap next $next_heap
252 check_heap prev $prev_heap