$(TOP)/src/test_pcache.c \
$(TOP)/src/test_schema.c \
$(TOP)/src/test_server.c \
+ $(TOP)/src/test_stat.c \
$(TOP)/src/test_tclvar.c \
$(TOP)/src/test_thread.c \
$(TOP)/src/test_vfs.c \
$(TOP)/src/test_pcache.c \
$(TOP)/src/test_schema.c \
$(TOP)/src/test_server.c \
+ $(TOP)/src/test_stat.c \
$(TOP)/src/test_tclvar.c \
$(TOP)/src/test_thread.c \
$(TOP)/src/test_vfs.c \
-C Exclude\sa\sfew\smore\sFTS\smodules\sfrom\sthe\s"in\smemory"\spermutation\stest\ssuite.
-D 2010-07-09T19:32:28
+C Changes\sso\sthat\sthe\sspace-analyzer\sscript\sworks\swith\s3.7.0.
+D 2010-07-12T08:39:38
F Makefile.arm-wince-mingw32ce-gcc fcd5e9cd67fe88836360bb4f9ef4cb7f8e2fb5a0
-F Makefile.in c4270a1cd7cd70a263b7e96a258aa90e9c3618eb
+F Makefile.in 3340503a02ffc70370f8308a484c99330589774d
F Makefile.linux-gcc d53183f4aa6a9192d249731c90dbdffbd2c68654
F Makefile.vxworks 4314cde20a1d9460ec5083526ea975442306ae7e
F README cd04a36fbc7ea56932a4052d7d0b7f09f27c33d6
F ext/rtree/viewrtree.tcl eea6224b3553599ae665b239bd827e182b466024
F install-sh 9d4de14ab9fb0facae2f48780b874848cbf2f895 x
F ltmain.sh 3ff0879076df340d2e23ae905484d8c15d5fdea8
-F main.mk f425173735dc971acf24378a716e6d5df43ca408
+F main.mk 471e9b275ac3177c46ee077bcaba76f74313a13f
F mkdll.sh 7d09b23c05d56532e9d44a50868eb4b12ff4f74a
F mkextu.sh 416f9b7089d80e5590a29692c9d9280a10dbad9f
F mkextw.sh 4123480947681d9b434a5e7b1ee08135abe409ac
F src/sqliteLimit.h 196e2f83c3b444c4548fc1874f52f84fdbda40f3
F src/status.c 4df6fe7dce2d256130b905847c6c60055882bdbe
F src/table.c 2cd62736f845d82200acfa1287e33feb3c15d62e
-F src/tclsqlite.c b898ab058f51ee57e6b54aa8de902526a5376959
+F src/tclsqlite.c 0e47807e6e05269152a0d51348b72c221128723c
F src/test1.c a48320a6481761c46b61ee1c1ee39177add8fccd
F src/test2.c e3f564ab1e9fd0b47b0c9e23e7054e38bf0836cf
F src/test3.c 4c21700c73a890a47fc685c1097bfb661346ac94
F src/test_pcache.c 7bf828972ac0d2403f5cfa4cd14da41f8ebe73d8
F src/test_schema.c 8c06ef9ddb240c7a0fcd31bc221a6a2aade58bf0
F src/test_server.c bbba05c144b5fc4b52ff650a4328027b3fa5fcc6
+F src/test_stat.c 29b4b949834b8c1901b5fd52df49ae36bea594d2
F src/test_tclvar.c f4dc67d5f780707210d6bb0eb6016a431c04c7fa
F src/test_thread.c bedd05cad673dba53326f3aa468cc803038896c0
F src/test_vfs.c bea0f0bdee9b033a62d057bf3451c25760b0414d
F tool/showwal.c f09e5a80a293919290ec85a6a37c85a5ddcf37d9
F tool/soak1.tcl 8d407956e1a45b485a8e072470a3e629a27037fe
F tool/space_used.tcl f714c41a59e326b8b9042f415b628b561bafa06b
-F tool/spaceanal.tcl b87db46ae29e3116411b1686e136b9b994d7de39
+F tool/spaceanal.tcl b91879d52bf77a1ff5382493284f429d32a63490
F tool/speedtest.tcl 06c76698485ccf597b9e7dbb1ac70706eb873355
F tool/speedtest16.c c8a9c793df96db7e4933f0852abb7a03d48f2e81
F tool/speedtest2.tcl ee2149167303ba8e95af97873c575c3e0fab58ff
F tool/speedtest8.c 2902c46588c40b55661e471d7a86e4dd71a18224
F tool/speedtest8inst1.c 293327bc76823f473684d589a8160bde1f52c14e
F tool/vdbe-compress.tcl d70ea6d8a19e3571d7ab8c9b75cba86d1173ff0f
-P 6af6794cac5516f8cbc425cb8c73468ca371a1ad
-R 296bf4b92cd30dce2030d2dd3e08c3d8
-U shaneh
-Z 4214507f2db7672c59426bd44673cfd9
+P 4e9d69cd5ac3cc4d12ccea3028cba73e3c3f2965
+R 43d1a2278532e5bb600b8e2f0d56817e
+U dan
+Z 2a147625d7ac5be213d9b0b32d458d0e
-4e9d69cd5ac3cc4d12ccea3028cba73e3c3f2965
\ No newline at end of file
+86159cb3f00a380dc55be3affb01c433618f0683
\ No newline at end of file
extern int Sqlitetestbackup_Init(Tcl_Interp*);
extern int Sqlitetestintarray_Init(Tcl_Interp*);
extern int Sqlitetestvfs_Init(Tcl_Interp *);
+ extern int SqlitetestStat_Init(Tcl_Interp*);
Sqliteconfig_Init(interp);
Sqlitetest1_Init(interp);
Sqlitetestbackup_Init(interp);
Sqlitetestintarray_Init(interp);
Sqlitetestvfs_Init(interp);
+ SqlitetestStat_Init(interp);
Tcl_CreateObjCommand(interp,"load_testfixture_extensions",init_all_cmd,0,0);
--- /dev/null
+/*
+** 2010 July 12
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+*/
+
+#include "sqliteInt.h"
+
+/*
+** Page paths:
+**
+** The value of the 'path' column describes the path taken from the
+** root-node of the b-tree structure to each page. The value of the
+** root-node path is '/'.
+**
+** The value of the path for the left-most child page of the root of
+** a b-tree is '/000/'. The next to left-most child of the root page is
+** '/001', and so on, each sibling page identified by a 3-digit hex
+** value. The children of the 450th left-most sibling have paths such
+** as '/1c2/000/, '/1c2/001/' etc.
+**
+** Overflow pages are specified by appending a '+' character and a
+** six-digit hexadecimal value to the path to the cell they are linked
+** from. For example, the three overflow pages in a chain linked from
+** the left-most cell of the 450th child of the root page are identified
+** by the paths:
+**
+** '/1c2/000+000000' // First page in overflow chain
+** '/1c2/000+000001' // Second page in overflow chain
+** '/1c2/000+000002' // Third page in overflow chain
+**
+** If the paths are sorted using the BINARY collation sequence, then
+** the overflow pages associated with a cell will appear earlier in the
+** sort-order than its child page:
+**
+** '/1c2/000/' // Left-most child of 450th child of root
+*/
+#define VTAB_SCHEMA \
+ "CREATE TABLE xx( " \
+ " name STRING, /* Name of table or index */" \
+ " path INTEGER, /* Path to page from root */" \
+ " pageno INTEGER, /* Page number */" \
+ " pagetype STRING, /* 'internal', 'leaf' or 'overflow' */" \
+ " ncell INTEGER, /* Cells on page (0 for overflow) */" \
+ " payload INTEGER, /* Bytes of payload on this page */" \
+ " unused INTEGER, /* Bytes of unused space on this page */" \
+ " mx_payload INTEGER /* Largest payload size of all cells */" \
+ ");"
+
+#if 0
+#define VTAB_SCHEMA2 \
+ "CREATE TABLE yy( " \
+ " pageno INTEGER, /* B-tree page number */" \
+ " cellno INTEGER, /* Cell number within page */" \
+ " local INTEGER, /* Bytes of content stored locally */" \
+ " payload INTEGER, /* Total cell payload size */" \
+ " novfl INTEGER /* Number of overflow pages */" \
+ ");"
+#endif
+
+
+typedef struct StatTable StatTable;
+typedef struct StatCursor StatCursor;
+typedef struct StatPage StatPage;
+typedef struct StatCell StatCell;
+
+struct StatCell {
+ int nLocal; /* Bytes of local payload */
+ u32 iChildPg; /* Child node (or 0 if this is a leaf) */
+ int nOvfl; /* Entries in aOvfl[] */
+ u32 *aOvfl; /* Array of overflow page numbers */
+ int nLastOvfl; /* Bytes of payload on final overflow page */
+ int iOvfl; /* Iterates through aOvfl[] */
+};
+
+struct StatPage {
+ u32 iPgno;
+ DbPage *pPg;
+ int iCell;
+
+ char *zPath; /* Path to this page */
+
+ /* Variables populated by statDecodePage(): */
+ u8 flags; /* Copy of flags byte */
+ int nCell; /* Number of cells on page */
+ int nUnused; /* Number of unused bytes on page */
+ StatCell *aCell; /* Array of parsed cells */
+ u32 iRightChildPg; /* Right-child page number (or 0) */
+ int nMxPayload; /* Largest payload of any cell on this page */
+};
+
+struct StatCursor {
+ sqlite3_vtab_cursor base;
+ sqlite3_stmt *pStmt; /* Iterates through set of root pages */
+ int isEof; /* After pStmt has returned SQLITE_DONE */
+
+ StatPage aPage[32];
+ int iPage; /* Current entry in aPage[] */
+
+ /* Values to return. */
+ char *zName; /* Value of 'name' column */
+ char *zPath; /* Value of 'path' column */
+ u32 iPageno; /* Value of 'pageno' column */
+ char *zPagetype; /* Value of 'pagetype' column */
+ int nCell; /* Value of 'ncell' column */
+ int nPayload; /* Value of 'payload' column */
+ int nUnused; /* Value of 'unused' column */
+ int nMxPayload; /* Value of 'mx_payload' column */
+};
+
+struct StatTable {
+ sqlite3_vtab base;
+ sqlite3 *db;
+};
+
+#ifndef get2byte
+# define get2byte(x) ((x)[0]<<8 | (x)[1])
+#endif
+
+/*
+** Connect to or create a statvfs virtual table.
+*/
+static int statConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ StatTable *pTab;
+
+ pTab = (StatTable *)sqlite3_malloc(sizeof(StatTable));
+ memset(pTab, 0, sizeof(StatTable));
+ pTab->db = db;
+
+ sqlite3_declare_vtab(db, VTAB_SCHEMA);
+ *ppVtab = &pTab->base;
+ return SQLITE_OK;
+}
+
+/*
+** Disconnect from or destroy a statvfs virtual table.
+*/
+static int statDisconnect(sqlite3_vtab *pVtab){
+ sqlite3_free(pVtab);
+ return SQLITE_OK;
+}
+
+/*
+** There is no "best-index". This virtual table always does a linear
+** scan of the binary VFS log file.
+*/
+static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
+
+ /* Records are always returned in ascending order of (name, path).
+ ** If this will satisfy the client, set the orderByConsumed flag so that
+ ** SQLite does not do an external sort.
+ */
+ if( ( pIdxInfo->nOrderBy==1
+ && pIdxInfo->aOrderBy[0].iColumn==0
+ && pIdxInfo->aOrderBy[0].desc==0
+ ) ||
+ ( pIdxInfo->nOrderBy==2
+ && pIdxInfo->aOrderBy[0].iColumn==0
+ && pIdxInfo->aOrderBy[0].desc==0
+ && pIdxInfo->aOrderBy[1].iColumn==1
+ && pIdxInfo->aOrderBy[1].desc==0
+ )
+ ){
+ pIdxInfo->orderByConsumed = 1;
+ }
+
+ pIdxInfo->estimatedCost = 10.0;
+ return SQLITE_OK;
+}
+
+/*
+** Open a new statvfs cursor.
+*/
+static int statOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
+ StatTable *pTab = (StatTable *)pVTab;
+ StatCursor *pCsr;
+ int rc;
+
+ pCsr = (StatCursor *)sqlite3_malloc(sizeof(StatCursor));
+ memset(pCsr, 0, sizeof(StatCursor));
+ pCsr->base.pVtab = pVTab;
+
+ rc = sqlite3_prepare_v2(pTab->db,
+ "SELECT 'sqlite_master' AS name, 1 AS rootpage, 'table' AS type"
+ " UNION ALL "
+ "SELECT name, rootpage, type FROM sqlite_master WHERE rootpage!=0"
+ " ORDER BY name", -1,
+ &pCsr->pStmt, 0
+ );
+ if( rc!=SQLITE_OK ){
+ sqlite3_free(pCsr);
+ return rc;
+ }
+
+ *ppCursor = (sqlite3_vtab_cursor *)pCsr;
+ return SQLITE_OK;
+}
+
+static void statClearPage(StatPage *p){
+ int i;
+ for(i=0; i<p->nCell; i++){
+ sqlite3_free(p->aCell[i].aOvfl);
+ }
+ sqlite3PagerUnref(p->pPg);
+ sqlite3_free(p->aCell);
+ sqlite3_free(p->zPath);
+ memset(p, 0, sizeof(StatPage));
+}
+
+static void statResetCsr(StatCursor *pCsr){
+ int i;
+ sqlite3_reset(pCsr->pStmt);
+ for(i=0; i<ArraySize(pCsr->aPage); i++){
+ statClearPage(&pCsr->aPage[i]);
+ }
+ pCsr->iPage = 0;
+ sqlite3_free(pCsr->zPath);
+ pCsr->zPath = 0;
+}
+
+/*
+** Close a statvfs cursor.
+*/
+static int statClose(sqlite3_vtab_cursor *pCursor){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ statResetCsr(pCsr);
+ sqlite3_finalize(pCsr->pStmt);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+static void getLocalPayload(
+ int nUsable, /* Usable bytes per page */
+ u8 flags, /* Page flags */
+ int nTotal, /* Total record (payload) size */
+ int *pnLocal /* OUT: Bytes stored locally */
+){
+ int nLocal;
+ int nMinLocal;
+ int nMaxLocal;
+
+ if( flags==0x0D ){ /* Table leaf node */
+ nMinLocal = (nUsable - 12) * 32 / 255 - 23;
+ nMaxLocal = nUsable - 35;
+ }else{ /* Index interior and leaf nodes */
+ nMinLocal = (nUsable - 12) * 32 / 255 - 23;
+ nMaxLocal = (nUsable - 12) * 64 / 255 - 23;
+ }
+
+ nLocal = nMinLocal + (nTotal - nMinLocal) % (nUsable - 4);
+ if( nLocal>nMaxLocal ) nLocal = nMinLocal;
+ *pnLocal = nLocal;
+}
+
+static int statDecodePage(Btree *pBt, StatPage *p){
+ int nUnused;
+ int iOff;
+ int nHdr;
+ int isLeaf;
+
+ u8 *aData = sqlite3PagerGetData(p->pPg);
+ u8 *aHdr = &aData[p->iPgno==1 ? 100 : 0];
+
+ p->flags = aHdr[0];
+ p->nCell = get2byte(&aHdr[3]);
+ p->nMxPayload = 0;
+
+ isLeaf = (p->flags==0x0A || p->flags==0x0D);
+ nHdr = 12 - isLeaf*4 + (p->iPgno==1)*100;
+
+ nUnused = get2byte(&aHdr[5]) - nHdr - 2*p->nCell;
+ nUnused += (int)aHdr[7];
+ iOff = get2byte(&aHdr[1]);
+ while( iOff ){
+ nUnused += get2byte(&aData[iOff+2]);
+ iOff = get2byte(&aData[iOff]);
+ }
+ p->nUnused = nUnused;
+ p->iRightChildPg = isLeaf ? 0 : sqlite3Get4byte(&aHdr[8]);
+
+ if( p->nCell ){
+ int i; /* Used to iterate through cells */
+ int nUsable = sqlite3BtreeGetPageSize(pBt) - sqlite3BtreeGetReserve(pBt);
+
+ p->aCell = sqlite3_malloc((p->nCell+1) * sizeof(StatCell));
+ memset(p->aCell, 0, (p->nCell+1) * sizeof(StatCell));
+
+ for(i=0; i<p->nCell; i++){
+ StatCell *pCell = &p->aCell[i];
+
+ iOff = get2byte(&aData[nHdr+i*2]);
+ if( !isLeaf ){
+ pCell->iChildPg = sqlite3Get4byte(&aData[iOff]);
+ iOff += 4;
+ }
+ if( p->flags==0x05 ){
+ /* A table interior node. nPayload==0. */
+ }else{
+ u32 nPayload; /* Bytes of payload total (local+overflow) */
+ int nLocal; /* Bytes of payload stored locally */
+ iOff += getVarint32(&aData[iOff], nPayload);
+ if( p->flags==0x0D ){
+ u64 dummy;
+ iOff += sqlite3GetVarint(&aData[iOff], &dummy);
+ }
+ if( nPayload>p->nMxPayload ) p->nMxPayload = nPayload;
+ getLocalPayload(nUsable, p->flags, nPayload, &nLocal);
+ pCell->nLocal = nLocal;
+ assert( nPayload>=nLocal );
+ assert( nLocal<=(nUsable-35) );
+ if( nPayload>nLocal ){
+ int j;
+ int nOvfl = ((nPayload - nLocal) + nUsable-4 - 1) / (nUsable - 4);
+ pCell->nLastOvfl = (nPayload-nLocal) - (nOvfl-1) * (nUsable-4);
+ pCell->nOvfl = nOvfl;
+ pCell->aOvfl = sqlite3_malloc(sizeof(u32)*nOvfl);
+ pCell->aOvfl[0] = sqlite3Get4byte(&aData[iOff+nLocal]);
+ for(j=1; j<nOvfl; j++){
+ int rc;
+ u32 iPrev = pCell->aOvfl[j-1];
+ DbPage *pPg = 0;
+ rc = sqlite3PagerGet(sqlite3BtreePager(pBt), iPrev, &pPg);
+ if( rc!=SQLITE_OK ){
+ assert( pPg==0 );
+ return rc;
+ }
+ pCell->aOvfl[j] = sqlite3Get4byte(sqlite3PagerGetData(pPg));
+ sqlite3PagerUnref(pPg);
+ }
+ }
+ }
+ }
+ }
+
+ return SQLITE_OK;
+}
+
+static void statSetPath(StatPage *p, StatPage *pParent){
+ if( pParent ){
+ p->zPath = sqlite3_mprintf("%s%.3x/", pParent->zPath, pParent->iCell);
+ }else{
+ }
+}
+
+/*
+** Move a statvfs cursor to the next entry in the file.
+*/
+static int statNext(sqlite3_vtab_cursor *pCursor){
+ int rc;
+ int nPayload;
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ StatTable *pTab = (StatTable *)pCursor->pVtab;
+ Btree *pBt = pTab->db->aDb[0].pBt;
+ Pager *pPager = sqlite3BtreePager(pBt);
+
+ sqlite3_free(pCsr->zPath);
+ pCsr->zPath = 0;
+
+ if( pCsr->aPage[0].pPg==0 ){
+ rc = sqlite3_step(pCsr->pStmt);
+ if( rc==SQLITE_ROW ){
+ u32 iRoot = sqlite3_column_int64(pCsr->pStmt, 1);
+ rc = sqlite3PagerGet(pPager, iRoot, &pCsr->aPage[0].pPg);
+ pCsr->aPage[0].iPgno = iRoot;
+ pCsr->aPage[0].iCell = 0;
+ pCsr->aPage[0].zPath = sqlite3_mprintf("/");
+ pCsr->iPage = 0;
+ }else{
+ pCsr->isEof = 1;
+ return sqlite3_reset(pCsr->pStmt);
+ }
+ }else{
+
+ /* Page p itself has already been visited. */
+ StatPage *p = &pCsr->aPage[pCsr->iPage];
+
+ while( p->iCell<p->nCell ){
+ StatCell *pCell = &p->aCell[p->iCell];
+ if( pCell->iOvfl<pCell->nOvfl ){
+ int nUsable = sqlite3BtreeGetPageSize(pBt)-sqlite3BtreeGetReserve(pBt);
+ pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0);
+ pCsr->iPageno = pCell->aOvfl[pCell->iOvfl];
+ pCsr->zPagetype = "overflow";
+ pCsr->nCell = 0;
+ pCsr->nMxPayload = 0;
+ pCsr->zPath = sqlite3_mprintf(
+ "%s%.3x+%.6x", p->zPath, p->iCell, pCell->iOvfl
+ );
+ if( pCell->iOvfl<pCell->nOvfl-1 ){
+ pCsr->nUnused = 0;
+ pCsr->nPayload = nUsable - 4;
+ }else{
+ pCsr->nPayload = pCell->nLastOvfl;
+ pCsr->nUnused = nUsable - 4 - pCsr->nPayload;
+ }
+ pCell->iOvfl++;
+ return SQLITE_OK;
+ }
+ if( p->iRightChildPg ) break;
+ p->iCell++;
+ }
+
+ while( !p->iRightChildPg || p->iCell>p->nCell ){
+ statClearPage(p);
+ if( pCsr->iPage==0 ) return statNext(pCursor);
+ pCsr->iPage--;
+ p = &pCsr->aPage[pCsr->iPage];
+ }
+ pCsr->iPage++;
+ assert( p==&pCsr->aPage[pCsr->iPage-1] );
+
+ if( p->iCell==p->nCell ){
+ p[1].iPgno = p->iRightChildPg;
+ }else{
+ p[1].iPgno = p->aCell[p->iCell].iChildPg;
+ }
+ rc = sqlite3PagerGet(pPager, p[1].iPgno, &p[1].pPg);
+ p[1].iCell = 0;
+ p[1].zPath = sqlite3_mprintf("%s%.3x/", p->zPath, p->iCell);
+ p->iCell++;
+ }
+
+
+ /* Populate the StatCursor fields with the values to be returned
+ ** by the xColumn() and xRowid() methods.
+ */
+ if( rc==SQLITE_OK ){
+ int i;
+ StatPage *p = &pCsr->aPage[pCsr->iPage];
+ pCsr->zName = (char *)sqlite3_column_text(pCsr->pStmt, 0);
+ pCsr->iPageno = p->iPgno;
+
+ statDecodePage(pBt, p);
+
+ switch( p->flags ){
+ case 0x05: /* table internal */
+ case 0x02: /* index internal */
+ pCsr->zPagetype = "internal";
+ break;
+ case 0x0D: /* table leaf */
+ case 0x0A: /* index leaf */
+ pCsr->zPagetype = "leaf";
+ break;
+ default:
+ pCsr->zPagetype = "corrupted";
+ break;
+ }
+ pCsr->nCell = p->nCell;
+ pCsr->nUnused = p->nUnused;
+ pCsr->nMxPayload = p->nMxPayload;
+ pCsr->zPath = sqlite3_mprintf("%s", p->zPath);
+ nPayload = 0;
+ for(i=0; i<p->nCell; i++){
+ nPayload += p->aCell[i].nLocal;
+ }
+ pCsr->nPayload = nPayload;
+ }
+
+ return rc;
+}
+
+static int statEof(sqlite3_vtab_cursor *pCursor){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ return pCsr->isEof;
+}
+
+static int statFilter(
+ sqlite3_vtab_cursor *pCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ sqlite3 *db = ((StatTable *)(pCursor->pVtab))->db;
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ int nPage = 0;
+
+ statResetCsr((StatCursor *)pCursor);
+ sqlite3PagerPagecount(sqlite3BtreePager(db->aDb[0].pBt), &nPage);
+ if( nPage==0 ){
+ pCsr->isEof = 1;
+ return SQLITE_OK;
+ }
+
+ return statNext(pCursor);
+}
+
+static int statColumn(
+ sqlite3_vtab_cursor *pCursor,
+ sqlite3_context *ctx,
+ int i
+){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ switch( i ){
+ case 0: /* name */
+ sqlite3_result_text(ctx, pCsr->zName, -1, SQLITE_STATIC);
+ break;
+ case 1: /* path */
+ sqlite3_result_text(ctx, pCsr->zPath, -1, SQLITE_TRANSIENT);
+ break;
+ case 2: /* pageno */
+ sqlite3_result_int64(ctx, pCsr->iPageno);
+ break;
+ case 3: /* pagetype */
+ sqlite3_result_text(ctx, pCsr->zPagetype, -1, SQLITE_STATIC);
+ break;
+ case 4: /* ncell */
+ sqlite3_result_int(ctx, pCsr->nCell);
+ break;
+ case 5: /* payload */
+ sqlite3_result_int(ctx, pCsr->nPayload);
+ break;
+ case 6: /* unused */
+ sqlite3_result_int(ctx, pCsr->nUnused);
+ break;
+ case 7: /* mx_payload */
+ sqlite3_result_int(ctx, pCsr->nMxPayload);
+ break;
+ }
+ return SQLITE_OK;
+}
+
+static int statRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
+ StatCursor *pCsr = (StatCursor *)pCursor;
+ *pRowid = pCsr->iPageno;
+ return SQLITE_OK;
+}
+
+int sqlite3_dbstat_register(sqlite3 *db){
+ static sqlite3_module dbstat_module = {
+ 0, /* iVersion */
+ statConnect, /* xCreate */
+ statConnect, /* xConnect */
+ statBestIndex, /* xBestIndex */
+ statDisconnect, /* xDisconnect */
+ statDisconnect, /* xDestroy */
+ statOpen, /* xOpen - open a cursor */
+ statClose, /* xClose - close a cursor */
+ statFilter, /* xFilter - configure scan constraints */
+ statNext, /* xNext - advance a cursor */
+ statEof, /* xEof - check for end of scan */
+ statColumn, /* xColumn - read data */
+ statRowid, /* xRowid - read data */
+ 0, /* xUpdate */
+ 0, /* xBegin */
+ 0, /* xSync */
+ 0, /* xCommit */
+ 0, /* xRollback */
+ 0, /* xFindMethod */
+ 0, /* xRename */
+ };
+ sqlite3_create_module(db, "dbstat", &dbstat_module, 0);
+ return SQLITE_OK;
+}
+
+#ifdef SQLITE_TEST
+#include <tcl.h>
+
+static int test_dbstat(
+ void *clientData,
+ Tcl_Interp *interp,
+ int objc,
+ Tcl_Obj *CONST objv[]
+){
+#ifdef SQLITE_OMIT_VIRTUALTABLE
+ Tcl_AppendResult(interp, "dbstat not available because of "
+ "SQLITE_OMIT_VIRTUALTABLE", (void*)0);
+ return TCL_ERROR;
+#else
+ struct SqliteDb { sqlite3 *db; };
+ char *zDb;
+ Tcl_CmdInfo cmdInfo;
+
+ if( objc!=2 ){
+ Tcl_WrongNumArgs(interp, 1, objv, "DB");
+ return TCL_ERROR;
+ }
+
+ zDb = Tcl_GetString(objv[1]);
+ if( Tcl_GetCommandInfo(interp, zDb, &cmdInfo) ){
+ sqlite3* db = ((struct SqliteDb*)cmdInfo.objClientData)->db;
+ sqlite3_dbstat_register(db);
+ }
+ return TCL_OK;
+#endif
+}
+
+int SqlitetestStat_Init(Tcl_Interp *interp){
+ Tcl_CreateObjCommand(interp, "register_dbstat_vtab", test_dbstat, 0, 0);
+ return TCL_OK;
+}
+#endif
+
exit 1
}
-# Maximum distance between pages before we consider it a "gap"
-#
-set MAXGAP 3
-
# Open the database
#
sqlite3 db [lindex $argv 0]
-set DB [btree_open [lindex $argv 0] 1000 0]
+register_dbstat_vtab db
+
+set pageSize [db one {PRAGMA page_size}]
+
+#set DB [btree_open [lindex $argv 0] 1000 0]
# In-memory database for collecting statistics. This script loops through
# the tables and indices in the database being analyzed, adding a row for each
);}
mem eval $tabledef
+# Create a temporary "dbstat" virtual table.
+#
+db eval {
+ CREATE VIRTUAL TABLE temp.stat USING dbstat;
+ CREATE TEMP TABLE dbstat AS SELECT * FROM temp.stat ORDER BY name, path;
+ DROP TABLE temp.stat;
+}
+
+proc isleaf {pagetype is_index} {
+ return [expr {$pagetype == "leaf" || ($pagetype == "internal" && $is_index)}]
+}
+proc isoverflow {pagetype is_index} {
+ return [expr {$pagetype == "overflow"}]
+}
+proc isinternal {pagetype is_index} {
+ return [expr {$pagetype == "internal" && $is_index==0}]
+}
+
+db func isleaf isleaf
+db func isinternal isinternal
+db func isoverflow isoverflow
+
+set sql { SELECT name, tbl_name FROM sqlite_master WHERE rootpage>0 }
+foreach {name tblname} [concat sqlite_master sqlite_master [db eval $sql]] {
+
+ set is_index [expr {$name!=$tblname}]
+ db eval {
+ SELECT
+ sum(ncell) AS nentry,
+ sum(isleaf(pagetype, $is_index) * ncell) AS leaf_entries,
+ sum(payload) AS payload,
+ sum(isoverflow(pagetype, $is_index) * payload) AS ovfl_payload,
+ sum(path LIKE '%+000000') AS ovfl_cnt,
+ max(mx_payload) AS mx_payload,
+ sum(isinternal(pagetype, $is_index)) AS int_pages,
+ sum(isleaf(pagetype, $is_index)) AS leaf_pages,
+ sum(isoverflow(pagetype, $is_index)) AS ovfl_pages,
+ sum(isinternal(pagetype, $is_index) * unused) AS int_unused,
+ sum(isleaf(pagetype, $is_index) * unused) AS leaf_unused,
+ sum(isoverflow(pagetype, $is_index) * unused) AS ovfl_unused
+ FROM temp.dbstat WHERE name = $name
+ } break
+
+ # Column 'gap_cnt' is set to the number of non-contiguous entries in the
+ # list of pages visited if the b-tree structure is traversed in a top-down
+ # fashion (each node visited before its child-tree is passed). Any overflow
+ # chains present are traversed from start to finish before any child-tree
+ # is.
+ #
+ set gap_cnt 0
+ set pglist [db eval {
+ SELECT pageno FROM temp.dbstat WHERE name = $name ORDER BY rowid
+ }]
+ set prev [lindex $pglist 0]
+ foreach pgno [lrange $pglist 1 end] {
+ if {$pgno != $prev+1} {incr gap_cnt}
+ set prev $pgno
+ }
+
+ mem eval {
+ INSERT INTO space_used VALUES(
+ $name,
+ $tblname,
+ $is_index,
+ $nentry,
+ $leaf_entries,
+ $payload,
+ $ovfl_payload,
+ $ovfl_cnt,
+ $mx_payload,
+ $int_pages,
+ $leaf_pages,
+ $ovfl_pages,
+ $int_unused,
+ $leaf_unused,
+ $ovfl_unused,
+ $gap_cnt
+ );
+ }
+}
+
proc integerify {real} {
if {[string is double -strict $real]} {
return [expr {int($real)}]
return '$q'
}
-# This proc is a wrapper around the btree_cursor_info command. The
-# second argument is an open btree cursor returned by [btree_cursor].
-# The first argument is the name of an array variable that exists in
-# the scope of the caller. If the third argument is non-zero, then
-# info is returned for the page that lies $up entries upwards in the
-# tree-structure. (i.e. $up==1 returns the parent page, $up==2 the
-# grandparent etc.)
-#
-# The following entries in that array are filled in with information retrieved
-# using [btree_cursor_info]:
-#
-# $arrayvar(page_no) = The page number
-# $arrayvar(entry_no) = The entry number
-# $arrayvar(page_entries) = Total number of entries on this page
-# $arrayvar(cell_size) = Cell size (local payload + header)
-# $arrayvar(page_freebytes) = Number of free bytes on this page
-# $arrayvar(page_freeblocks) = Number of free blocks on the page
-# $arrayvar(payload_bytes) = Total payload size (local + overflow)
-# $arrayvar(header_bytes) = Header size in bytes
-# $arrayvar(local_payload_bytes) = Local payload size
-# $arrayvar(parent) = Parent page number
-#
-proc cursor_info {arrayvar csr {up 0}} {
- upvar $arrayvar a
- foreach [list a(page_no) \
- a(entry_no) \
- a(page_entries) \
- a(cell_size) \
- a(page_freebytes) \
- a(page_freeblocks) \
- a(payload_bytes) \
- a(header_bytes) \
- a(local_payload_bytes) \
- a(parent) \
- a(first_ovfl) ] [btree_cursor_info $csr $up] break
-}
-
-# Determine the page-size of the database. This global variable is used
-# throughout the script.
-#
-set pageSize [db eval {PRAGMA page_size}]
-
-# Analyze every table in the database, one at a time.
-#
-# The following query returns the name and root-page of each table in the
-# database, including the sqlite_master table.
-#
-set sql {
- SELECT name, rootpage FROM sqlite_master
- WHERE type='table' AND rootpage>0
- UNION ALL
- SELECT 'sqlite_master', 1
- ORDER BY 1
-}
-set wideZero [expr {10000000000 - 10000000000}]
-foreach {name rootpage} [db eval $sql] {
- puts stderr "Analyzing table $name..."
-
- # Code below traverses the table being analyzed (table name $name), using the
- # btree cursor $cursor. Statistics related to table $name are accumulated in
- # the following variables:
- #
- set total_payload $wideZero ;# Payload space used by all entries
- set total_ovfl $wideZero ;# Payload space on overflow pages
- set unused_int $wideZero ;# Unused space on interior nodes
- set unused_leaf $wideZero ;# Unused space on leaf nodes
- set unused_ovfl $wideZero ;# Unused space on overflow pages
- set cnt_ovfl $wideZero ;# Number of entries that use overflows
- set cnt_leaf_entry $wideZero ;# Number of leaf entries
- set cnt_int_entry $wideZero ;# Number of interor entries
- set mx_payload $wideZero ;# Maximum payload size
- set ovfl_pages $wideZero ;# Number of overflow pages used
- set leaf_pages $wideZero ;# Number of leaf pages
- set int_pages $wideZero ;# Number of interior pages
- set gap_cnt 0 ;# Number of holes in the page sequence
- set prev_pgno 0 ;# Last page number seen
-
- # As the btree is traversed, the array variable $seen($pgno) is set to 1
- # the first time page $pgno is encountered.
- #
- catch {unset seen}
-
- # The following loop runs once for each entry in table $name. The table
- # is traversed using the btree cursor stored in variable $csr
- #
- set csr [btree_cursor $DB $rootpage 0]
- for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
- incr cnt_leaf_entry
-
- # Retrieve information about the entry the btree-cursor points to into
- # the array variable $ci (cursor info).
- #
- cursor_info ci $csr
-
- # Check if the payload of this entry is greater than the current
- # $mx_payload statistic for the table. Also increase the $total_payload
- # statistic.
- #
- if {$ci(payload_bytes)>$mx_payload} {set mx_payload $ci(payload_bytes)}
- incr total_payload $ci(payload_bytes)
-
- # If this entry uses overflow pages, then update the $cnt_ovfl,
- # $total_ovfl, $ovfl_pages and $unused_ovfl statistics.
- #
- set ovfl [expr {$ci(payload_bytes)-$ci(local_payload_bytes)}]
- if {$ovfl} {
- incr cnt_ovfl
- incr total_ovfl $ovfl
- set n [expr {int(ceil($ovfl/($pageSize-4.0)))}]
- incr ovfl_pages $n
- incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}]
- set pglist [btree_ovfl_info $DB $csr]
- } else {
- set pglist {}
- }
-
- # If this is the first table entry analyzed for the page, then update
- # the page-related statistics $leaf_pages and $unused_leaf. Also, if
- # this page has a parent page that has not been analyzed, retrieve
- # info for the parent and update statistics for it too.
- #
- if {![info exists seen($ci(page_no))]} {
- set seen($ci(page_no)) 1
- incr leaf_pages
- incr unused_leaf $ci(page_freebytes)
- set pglist "$ci(page_no) $pglist"
-
- # Now check if the page has a parent that has not been analyzed. If
- # so, update the $int_pages, $cnt_int_entry and $unused_int statistics
- # accordingly. Then check if the parent page has a parent that has
- # not yet been analyzed etc.
- #
- # set parent $ci(parent_page_no)
- for {set up 1} \
- {$ci(parent)!=0 && ![info exists seen($ci(parent))]} {incr up} \
- {
- # Mark the parent as seen.
- #
- set seen($ci(parent)) 1
-
- # Retrieve info for the parent and update statistics.
- cursor_info ci $csr $up
- incr int_pages
- incr cnt_int_entry $ci(page_entries)
- incr unused_int $ci(page_freebytes)
-
- # parent pages come before their first child
- set pglist "$ci(page_no) $pglist"
- }
- }
-
- # Check the page list for fragmentation
- #
- foreach pg $pglist {
- if {$pg!=$prev_pgno+1 && $prev_pgno>0} {
- incr gap_cnt
- }
- set prev_pgno $pg
- }
- }
- btree_close_cursor $csr
-
- # Handle the special case where a table contains no data. In this case
- # all statistics are zero, except for the number of leaf pages (1) and
- # the unused bytes on leaf pages ($pageSize - 8).
- #
- # An exception to the above is the sqlite_master table. If it is empty
- # then all statistics are zero except for the number of leaf pages (1),
- # and the number of unused bytes on leaf pages ($pageSize - 112).
- #
- if {[llength [array names seen]]==0} {
- set leaf_pages 1
- if {$rootpage==1} {
- set unused_leaf [expr {$pageSize-112}]
- } else {
- set unused_leaf [expr {$pageSize-8}]
- }
- }
-
- # Insert the statistics for the table analyzed into the in-memory database.
- #
- set sql "INSERT INTO space_used VALUES("
- append sql [quote $name]
- append sql ",[quote $name]"
- append sql ",0"
- append sql ",[expr {$cnt_leaf_entry+$cnt_int_entry}]"
- append sql ",$cnt_leaf_entry"
- append sql ",$total_payload"
- append sql ",$total_ovfl"
- append sql ",$cnt_ovfl"
- append sql ",$mx_payload"
- append sql ",$int_pages"
- append sql ",$leaf_pages"
- append sql ",$ovfl_pages"
- append sql ",$unused_int"
- append sql ",$unused_leaf"
- append sql ",$unused_ovfl"
- append sql ",$gap_cnt"
- append sql );
- mem eval $sql
-}
-
-# Analyze every index in the database, one at a time.
-#
-# The query below returns the name, associated table and root-page number
-# for every index in the database.
-#
-set sql {
- SELECT name, tbl_name, rootpage FROM sqlite_master WHERE type='index'
- ORDER BY 2, 1
-}
-foreach {name tbl_name rootpage} [db eval $sql] {
- puts stderr "Analyzing index $name of table $tbl_name..."
-
- # Code below traverses the index being analyzed (index name $name), using the
- # btree cursor $cursor. Statistics related to index $name are accumulated in
- # the following variables:
- #
- set total_payload $wideZero ;# Payload space used by all entries
- set total_ovfl $wideZero ;# Payload space on overflow pages
- set unused_leaf $wideZero ;# Unused space on leaf nodes
- set unused_ovfl $wideZero ;# Unused space on overflow pages
- set cnt_ovfl $wideZero ;# Number of entries that use overflows
- set cnt_leaf_entry $wideZero ;# Number of leaf entries
- set mx_payload $wideZero ;# Maximum payload size
- set ovfl_pages $wideZero ;# Number of overflow pages used
- set leaf_pages $wideZero ;# Number of leaf pages
- set gap_cnt 0 ;# Number of holes in the page sequence
- set prev_pgno 0 ;# Last page number seen
-
- # As the btree is traversed, the array variable $seen($pgno) is set to 1
- # the first time page $pgno is encountered.
- #
- catch {unset seen}
-
- # The following loop runs once for each entry in index $name. The index
- # is traversed using the btree cursor stored in variable $csr
- #
- set csr [btree_cursor $DB $rootpage 0]
- for {btree_first $csr} {![btree_eof $csr]} {btree_next $csr} {
- incr cnt_leaf_entry
-
- # Retrieve information about the entry the btree-cursor points to into
- # the array variable $ci (cursor info).
- #
- cursor_info ci $csr
-
- # Check if the payload of this entry is greater than the current
- # $mx_payload statistic for the table. Also increase the $total_payload
- # statistic.
- #
- set payload [btree_keysize $csr]
- if {$payload>$mx_payload} {set mx_payload $payload}
- incr total_payload $payload
-
- # If this entry uses overflow pages, then update the $cnt_ovfl,
- # $total_ovfl, $ovfl_pages and $unused_ovfl statistics.
- #
- set ovfl [expr {$payload-$ci(local_payload_bytes)}]
- if {$ovfl} {
- incr cnt_ovfl
- incr total_ovfl $ovfl
- set n [expr {int(ceil($ovfl/($pageSize-4.0)))}]
- incr ovfl_pages $n
- incr unused_ovfl [expr {$n*($pageSize-4) - $ovfl}]
- }
-
- # If this is the first table entry analyzed for the page, then update
- # the page-related statistics $leaf_pages and $unused_leaf.
- #
- if {![info exists seen($ci(page_no))]} {
- set seen($ci(page_no)) 1
- incr leaf_pages
- incr unused_leaf $ci(page_freebytes)
- set pg $ci(page_no)
- if {$prev_pgno>0 && $pg!=$prev_pgno+1} {
- incr gap_cnt
- }
- set prev_pgno $ci(page_no)
- }
- }
- btree_close_cursor $csr
-
- # Handle the special case where a index contains no data. In this case
- # all statistics are zero, except for the number of leaf pages (1) and
- # the unused bytes on leaf pages ($pageSize - 8).
- #
- if {[llength [array names seen]]==0} {
- set leaf_pages 1
- set unused_leaf [expr {$pageSize-8}]
- }
-
- # Insert the statistics for the index analyzed into the in-memory database.
- #
- set sql "INSERT INTO space_used VALUES("
- append sql [quote $name]
- append sql ",[quote $tbl_name]"
- append sql ",1"
- append sql ",$cnt_leaf_entry"
- append sql ",$cnt_leaf_entry"
- append sql ",$total_payload"
- append sql ",$total_ovfl"
- append sql ",$cnt_ovfl"
- append sql ",$mx_payload"
- append sql ",0"
- append sql ",$leaf_pages"
- append sql ",$ovfl_pages"
- append sql ",0"
- append sql ",$unused_leaf"
- append sql ",$unused_ovfl"
- append sql ",$gap_cnt"
- append sql );
- mem eval $sql
-}
-
# Generate a single line of output in the statistics section of the
# report.
#
# pages and the page size used by the database (in bytes).
proc autovacuum_overhead {filePages pageSize} {
- # Read the value of meta 4. If non-zero, then the database supports
- # auto-vacuum. It would be possible to use "PRAGMA auto_vacuum" instead,
- # but that would not work if the SQLITE_OMIT_PRAGMA macro was defined
- # when the library was built.
- set meta4 [lindex [btree_get_meta $::DB] 4]
+ # Set $autovacuum to non-zero for databases that support auto-vacuum.
+ set autovacuum [db one {PRAGMA auto_vacuum}]
# If the database is not an auto-vacuum database or the file consists
# of one page only then there is no overhead for auto-vacuum. Return zero.
- if {0==$meta4 || $filePages==1} {
+ if {0==$autovacuum || $filePages==1} {
return 0
}
set free_pgcnt [expr $file_pgcnt-$inuse_pgcnt-$av_pgcnt]
set free_percent [percent $free_pgcnt $file_pgcnt]
-set free_pgcnt2 [lindex [btree_get_meta $DB] 0]
+set free_pgcnt2 [db one {PRAGMA freelist_count}]
set free_percent2 [percent $free_pgcnt2 $file_pgcnt]
set file_pgcnt2 [expr {$inuse_pgcnt+$free_pgcnt2+$av_pgcnt}]