]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/testsuite/gdb.base/bigcore.exp
This commit was manufactured by cvs2svn to create branch
[thirdparty/binutils-gdb.git] / gdb / testsuite / gdb.base / bigcore.exp
CommitLineData
6aba47ca 1# Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2004, 2005,
9b254dd1 2# 2007, 2008 Free Software Foundation, Inc.
2d822687
AC
3
4# This program is free software; you can redistribute it and/or modify
5# it under the terms of the GNU General Public License as published by
e22f8b7c 6# the Free Software Foundation; either version 3 of the License, or
2d822687 7# (at your option) any later version.
e22f8b7c 8#
2d822687
AC
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU General Public License for more details.
e22f8b7c 13#
2d822687 14# You should have received a copy of the GNU General Public License
e22f8b7c 15# along with this program. If not, see <http://www.gnu.org/licenses/>.
2d822687 16
2d822687
AC
17# This file is based on corefile.exp which was written by Fred
18# Fish. (fnf@cygnus.com)
19
20if $tracelevel then {
21 strace $tracelevel
22}
23
24set prms_id 0
25set bug_id 0
26
27# Are we on a target board? As of 2004-02-12, GDB didn't have a
28# mechanism that would let it efficiently access a remote corefile.
29
30if ![isnative] then {
31 untested "Remote system"
32 return
33}
34
35# Can the system run this test (in particular support sparse
36# corefiles)? On systems that lack sparse corefile support this test
37# consumes too many resources - gigabytes worth of disk space and and
38# I/O bandwith.
39
15f7b60e
MK
40if { [istarget "*-*-*bsd*"]
41 || [istarget "*-*-hpux*"]
cc984116
CV
42 || [istarget "*-*-solaris*"]
43 || [istarget "*-*-cygwin*"] } {
2d822687
AC
44 untested "Kernel lacks sparse corefile support (PR gdb/1551)"
45 return
46}
47
eac69dca
JB
48# This testcase causes too much stress (in terms of memory usage)
49# on certain systems...
50if { [istarget "*-*-*irix*"] } {
51 untested "Testcase too stressful for this system"
52 return
53}
54
2d822687
AC
55set testfile "bigcore"
56set srcfile ${testfile}.c
57set binfile ${objdir}/${subdir}/${testfile}
58set corefile ${objdir}/${subdir}/${testfile}.corefile
59
60if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
b60f0898
JB
61 untested bigcore.exp
62 return -1
2d822687
AC
63}
64
2d822687
AC
65# Run GDB on the bigcore program up-to where it will dump core.
66
67gdb_exit
68gdb_start
69gdb_reinitialize_dir $srcdir/$subdir
70gdb_load ${binfile}
71gdb_test "set print sevenbit-strings" "" \
72 "set print sevenbit-strings; ${testfile}"
73gdb_test "set width 0" "" \
74 "set width 0; ${testfile}"
75if { ![runto_main] } then {
76 gdb_suppress_tests;
77}
78set print_core_line [gdb_get_line_number "Dump core"]
79gdb_test "tbreak $print_core_line"
80gdb_test continue ".*print_string.*"
81gdb_test next ".*0 = 0.*"
82
83# Traverse part of bigcore's linked list of memory chunks (forward or
bf08c2a1 84# backward), saving each chunk's address.
2d822687
AC
85
86proc extract_heap { dir } {
87 global gdb_prompt
88 global expect_out
89 set heap ""
90 set test "extract ${dir} heap"
91 set lim 0
bf08c2a1 92 gdb_test_multiple "print heap.${dir}" "$test" {
2d822687
AC
93 -re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
94 pass "$test"
95 }
96 -re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
97 set heap [concat $heap $expect_out(1,string)]
98 if { $lim >= 50 } {
99 pass "$test (stop at $lim)"
100 } else {
101 incr lim
102 send_gdb "print \$.${dir}\n"
103 exp_continue
104 }
105 }
106 -re ".*$gdb_prompt $" {
107 fail "$test (entry $lim)"
108 }
109 timeout {
110 fail "$test (timeout)"
111 }
112 }
113 return $heap;
114}
115set next_heap [extract_heap next]
116set prev_heap [extract_heap prev]
117
26585198
DJ
118# Save the total allocated size within GDB so that we can check
119# the core size later.
120gdb_test "set \$bytes_allocated = bytes_allocated" "" "save heap size"
121
4bb3667f
AC
122# Now create a core dump
123
124# Rename the core file to "TESTFILE.corefile" rather than just "core",
125# to avoid problems with sys admin types that like to regularly prune
126# all files named "core" from the system.
127
128# Some systems append "core" to the name of the program; others append
129# the name of the program to "core"; still others (like Linux, as of
130# May 2003) create cores named "core.PID".
131
132# Save the process ID. Some systems dump the core into core.PID.
133set test "grab pid"
134gdb_test_multiple "info program" $test {
135 -re "child process (\[0-9\]+).*$gdb_prompt $" {
136 set inferior_pid $expect_out(1,string)
137 pass $test
138 }
139 -re "$gdb_prompt $" {
140 set inferior_pid unknown
141 pass $test
142 }
143}
144
145# Dump core using SIGABRT
146set oldtimeout $timeout
147set timeout 600
148gdb_test "signal SIGABRT" "Program terminated with signal SIGABRT, .*"
149
150# Find the corefile
151set file ""
152foreach pat [list core.${inferior_pid} ${testfile}.core core] {
153 set names [glob -nocomplain $pat]
154 if {[llength $names] == 1} {
155 set file [lindex $names 0]
156 remote_exec build "mv $file $corefile"
157 break
158 }
159}
160
161if { $file == "" } {
162 untested "Can't generate a core file"
163 return 0
164}
165
166# Check that the corefile is plausibly large enough. We're trying to
167# detect the case where the operating system has truncated the file
168# just before signed wraparound. TCL, unfortunately, has a similar
169# problem - so use catch. It can handle the "bad" size but not
170# necessarily the "good" one. And we must use GDB for the comparison,
171# similarly.
172
4bb3667f 173if {[catch {file size $corefile} core_size] == 0} {
3c0edcdc 174 set core_ok 0
26585198 175 gdb_test_multiple "print \$bytes_allocated < $core_size" "check core size" {
4bb3667f
AC
176 -re " = 1\r\n$gdb_prompt $" {
177 pass "check core size"
178 set core_ok 1
179 }
26585198
DJ
180 -re " = 0\r\n$gdb_prompt $" {
181 pass "check core size"
182 set core_ok 0
183 }
4bb3667f 184 }
3c0edcdc
AC
185} {
186 # Probably failed due to the TCL build having problems with very
187 # large values. Since GDB uses a 64-bit off_t (when possible) it
188 # shouldn't have this problem. Assume that things are going to
189 # work. Without this assumption the test is skiped on systems
190 # (such as i386 GNU/Linux with patched kernel) which do pass.
191 pass "check core size"
192 set core_ok 1
4bb3667f 193}
3c0edcdc 194if {! $core_ok} {
4bb3667f
AC
195 untested "check core size (system does not support large corefiles)"
196 return 0
197}
198
2d822687
AC
199# Now load up that core file
200
201set test "load corefile"
202gdb_test_multiple "core $corefile" "$test" {
203 -re "A program is being debugged already. Kill it. .y or n. " {
204 send_gdb "y\n"
205 exp_continue
206 }
207 -re "Core was generated by.*$gdb_prompt $" {
208 pass "$test"
209 }
210}
211
212# Finally, re-traverse bigcore's linked list, checking each chunk's
213# address against the executable. Don't use gdb_test_multiple as want
214# only one pass/fail. Don't use exp_continue as the regular
215# expression involving $heap needs to be re-evaluated for each new
216# response.
217
218proc check_heap { dir heap } {
219 global gdb_prompt
220 set test "check ${dir} heap"
221 set ok 1
222 set lim 0
223 send_gdb "print heap.${dir}\n"
224 while { $ok } {
225 gdb_expect {
226 -re " = \\(struct list \\*\\) [lindex $heap $lim].*$gdb_prompt $" {
227 if { $lim >= [llength $heap] } {
228 pass "$test"
229 set ok 0
230 } else {
231 incr lim
232 send_gdb "print \$.${dir}\n"
233 }
234 }
235 -re ".*$gdb_prompt $" {
236 fail "$test (address [lindex $heap $lim])"
237 set ok 0
238 }
239 timeout {
240 fail "$test (timeout)"
241 set ok 0
242 }
243 }
244 }
245}
246
247check_heap next $next_heap
248check_heap prev $prev_heap