-C Fix\stest\scode\srelated\sto\sreporting\sthe\ssize\sof\soverflow\spages\sin\szipvfs\sdatabases.
-D 2011-10-05T17:36:27.323
+C Update\sthe\ssqlite3_analyzer\sutility\sprogram\sto\sprovide\smore\sdetails\sabout\nthe\scompression\sperformance\sof\sZIPVFS\sdatabase\sfiles.
+D 2011-10-05T18:18:13.395
F Makefile.arm-wince-mingw32ce-gcc d6df77f1f48d690bd73162294bbba7f59507c72f
F Makefile.in a162fe39e249b8ed4a65ee947c30152786cfe897
F Makefile.linux-gcc 91d710bdc4998cb015f39edf3cb314ec4f4d7e23
F tool/showwal.c f09e5a80a293919290ec85a6a37c85a5ddcf37d9
F tool/soak1.tcl 8d407956e1a45b485a8e072470a3e629a27037fe
F tool/space_used.tcl f714c41a59e326b8b9042f415b628b561bafa06b
-F tool/spaceanal.tcl 58d357384760020443c41d01db612c9809c75de7
+F tool/spaceanal.tcl 7ba8b9784fe7c4fb89d3d9ca012d41c9d74b3c95
F tool/speedtest.tcl 06c76698485ccf597b9e7dbb1ac70706eb873355
F tool/speedtest16.c c8a9c793df96db7e4933f0852abb7a03d48f2e81
F tool/speedtest2.tcl ee2149167303ba8e95af97873c575c3e0fab58ff
F tool/tostr.awk e75472c2f98dd76e06b8c9c1367f4ab07e122d06
F tool/vdbe-compress.tcl d70ea6d8a19e3571d7ab8c9b75cba86d1173ff0f
F tool/warnings.sh fbc018d67fd7395f440c28f33ef0f94420226381
-P 328cc1867ffbbf1c953dfd843649f5f209c8e6ec
-R 9966c1a1728c681681f9fe1b1d6773fc
-U dan
-Z 69c3b890ba0b067ca386e3a142264668
+P ad7c9eed8bbd607babce4f5965f587c873e7bc02
+R e170bea645862be3dfb0e179487437ed
+U drh
+Z 9854118ea63abc101ac650ae8a3b25fa
#
if {[catch {
-if {![info exists argv0]} {
- set argv0 [file rootname [file tail [info nameofexecutable]]]
-}
-
# Get the name of the database to analyze
#
-#set argv $argv0
-if {![info exists argv] || [llength $argv]!=1} {
+proc usage {} {
+ set argv0 [file rootname [file tail [info nameofexecutable]]]
puts stderr "Usage: $argv0 database-name"
exit 1
}
-set file_to_analyze [lindex $argv 0]
+set file_to_analyze {}
+set flags(-pageinfo) 0
+append argv {}
+foreach arg $argv {
+ if {[regexp {^-+pageinfo$} $arg]} {
+ set flags(-pageinfo) 1
+ } elseif {[regexp {^-} $arg]} {
+ puts stderr "Unknown option: $arg"
+ usage
+ } elseif {$file_to_analyze!=""} {
+ usage
+ } else {
+ set file_to_analyze $arg
+ }
+}
+if {$file_to_analyze==""} usage
if {![file exists $file_to_analyze]} {
puts stderr "No such file: $file_to_analyze"
exit 1
puts stderr "File is not readable: $file_to_analyze"
exit 1
}
-if {[file size $file_to_analyze]<512} {
+set true_file_size [file size $file_to_analyze]
+if {$true_file_size<512} {
puts stderr "Empty or malformed database: $file_to_analyze"
exit 1
}
db func isinternal isinternal
db func isoverflow isoverflow
+set isCompressed 0
+set compressOverhead 0
set sql { SELECT name, tbl_name FROM sqlite_master WHERE rootpage>0 }
foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
FROM temp.dbstat WHERE name = $name
} break
+ set total_pages [expr {$leaf_pages+$int_pages+$ovfl_pages}]
+ set storage [expr {$total_pages*$pageSize}]
+ if {!$isCompressed && $storage>$compressed_size} {
+ set isCompressed 1
+ set compressOverhead 14
+ }
+
# Column 'gap_cnt' is set to the number of non-contiguous entries in the
# list of pages visited if the b-tree structure is traversed in a top-down
# fashion (each node visited before its child-tree is passed). Any overflow
# the $where clause determines which subset to analyze.
#
proc subreport {title where} {
- global pageSize file_pgcnt
+ global pageSize file_pgcnt compressOverhead
# Query the in-memory database for the sum of various statistics
# for the subset of tables/indices identified by the WHERE clause in
statline {Number of entries} $nleaf
statline {Bytes of storage consumed} $storage
if {$compressed_size!=$storage} {
+ set compressed_size [expr {$compressed_size+$compressOverhead*$total_pages}]
set pct [expr {$compressed_size*100.0/$storage}]
set pct [format {%5.1f%%} $pct]
statline {Bytes used after compression} $compressed_size $pct
statline {Number of indices} $nindex
statline {Number of named indices} $nmanindex
statline {Automatically generated indices} $nautoindex
-statline {Size of the file in bytes} $file_bytes
+if {$isCompressed} {
+ statline {Size of uncompressed content in bytes} $file_bytes
+ set efficiency [percent $true_file_size $file_bytes]
+ statline {Size of compressed file on disk} $true_file_size $efficiency
+} else {
+ statline {Size of the file in bytes} $file_bytes
+}
statline {Bytes of user payload stored} $user_payload $user_percent
# Output table rankings
FROM space_used GROUP BY tblname ORDER BY size+0 DESC, tblname} {} {
statline [string toupper $tblname] $size [percent $size $file_pgcnt]
}
+if {$isCompressed} {
+ puts ""
+ puts "*** Bytes of disk space used after compression ***********************"
+ puts ""
+ set csum 0
+ mem eval {SELECT tblname,
+ int(sum(compressed_size)) +
+ $compressOverhead*sum(int_pages+leaf_pages+ovfl_pages)
+ AS csize
+ FROM space_used GROUP BY tblname ORDER BY csize+0 DESC, tblname} {} {
+ incr csum $csize
+ statline [string toupper $tblname] $csize [percent $csize $true_file_size]
+ }
+ set overhead [expr {$true_file_size - $csum}]
+ if {$overhead>0} {
+ statline {Header and free space} $overhead [percent $overhead $true_file_size]
+ }
+}
# Output subreports
#