------BEGIN PGP SIGNED MESSAGE-----
-Hash: SHA1
-
-C Extensive\sedits\sto\sthe\scomments\sin\sthe\ssqlite.h.in\ssource\sfile\sto\sidentify\ntestable\sstatements\sof\struth\sabout\sthe\sC-language\sinterface.
-D 2009-12-11T03:44:19
+C Add\scomment\sto\sfts3rnd.test\sto\sexplain\show\sthe\stest\sworks.
+D 2009-12-11T07:07:36
F Makefile.arm-wince-mingw32ce-gcc fcd5e9cd67fe88836360bb4f9ef4cb7f8e2fb5a0
F Makefile.in c5827ead754ab32b9585487177c93bb00b9497b3
F Makefile.linux-gcc d53183f4aa6a9192d249731c90dbdffbd2c68654
F test/fts3expr2.test 18da930352e5693eaa163a3eacf96233b7290d1a
F test/fts3malloc.test d02ee86b21edd2b43044e0d6dfdcd26cb6efddcb
F test/fts3near.test dc196dd17b4606f440c580d45b3d23aa975fd077
-F test/fts3rnd.test d7fe25493aa76f5010df0a6dbfa4dfa14f537c26
+F test/fts3rnd.test b1fd9a0b8bd95014b1c2cb9d5a8f27b5b3afc50d
F test/func.test af106ed834001738246d276659406823e35cde7b
F test/func2.test 772d66227e4e6684b86053302e2d74a2500e1e0f
F test/fuzz.test a4174c3009a3e2c2e14b31b364ebf7ddb49de2c9
F tool/speedtest8.c 2902c46588c40b55661e471d7a86e4dd71a18224
F tool/speedtest8inst1.c 293327bc76823f473684d589a8160bde1f52c14e
F tool/vdbe-compress.tcl d70ea6d8a19e3571d7ab8c9b75cba86d1173ff0f
-P c2d22960f652264c48ec41b9ca4047aa142d86f7
-R 486c06b197bfc51eb94a7f962ed6737c
-U drh
-Z ea7c058e969ebde5efdd00ab50b34456
------BEGIN PGP SIGNATURE-----
-Version: GnuPG v1.4.6 (GNU/Linux)
-
-iD8DBQFLIcAWoxKgR168RlERAnIhAJ0RwDqvVTXI0zkKGEA3VqBT511IwACgitGK
-F8Of11tttSf9woDPKl/nH5c=
-=iUzM
------END PGP SIGNATURE-----
+P ea884e1ed8dba1aa0f3cf68fc71923954983f6c6
+R ae6e3fceaf15353a41abf5fd7eaec6f2
+U dan
+Z ad28381e408238f8547c0374a8ed6615
# Brute force (random data) tests for FTS3.
#
+#-------------------------------------------------------------------------
+#
+# The FTS3 tests implemented in this file focus on testing that FTS3
+# returns the correct set of documents for various types of full-text
+# query. This is done using pseudo-randomly generated data and queries.
+# The expected result of each query is calculated using Tcl code.
+#
+# 1. The database is initialized to contain a single table with three
+# columns. 100 rows are inserted into the table. Each of the three
+# values in each row is a document consisting of between 0 and 100
+# terms. Terms are selected from a vocabulary of $G(nVocab) terms.
+#
+# 2. The following is performed 100 times:
+#
+# a. A row is inserted into the database. The row contents are
+# generated as in step 1. The docid is a pseudo-randomly selected
+# value between 0 and 1000000.
+#
+# b. A psuedo-randomly selected row is updated. One of its columns is
+# set to contain a new document generated in the same way as the
+# documents in step 1.
+#
+# c. A psuedo-randomly selected row is deleted.
+#
+# d. For each of several types of fts3 queries, 10 SELECT queries
+# of the form:
+#
+# SELECT docid FROM <tbl> WHERE <tbl> MATCH '<query>'
+#
+# are evaluated. The results are compared to those calculated by
+# Tcl code in this file. The patterns used for the different query
+# types are:
+#
+# 1. query = <term>
+# 2. query = <prefix>
+# 3. query = "<term> <term>"
+# 4. query = "<term> <term> <term>"
+# 5. query = "<prefix> <prefix> <prefix>"
+# 6. query = <term> NEAR <term>
+# 7. query = <term> NEAR/11 <term> NEAR/11 <term>
+# 8. query = <term> OR <term>
+# 9. query = <term> NOT <term>
+# 10. query = <term> AND <term>
+# 11. query = <term> NEAR <term> OR <term> NEAR <term>
+# 12. query = <term> NEAR <term> NOT <term> NEAR <term>
+# 13. query = <term> NEAR <term> AND <term> NEAR <term>
+#
+# where <term> is a term psuedo-randomly selected from the vocabulary
+# and prefix is the first 2 characters of such a term followed by
+# a "*" character.
+#
+# Every second iteration, steps (a) through (d) above are performed
+# within a single transaction. This forces the queries in (d) to
+# read data from both the database and the in-memory hash table
+# that caches the full-text index entries created by steps (a), (b)
+# and (c) until the transaction is committed.
+#
+# The procedure above is run 5 times, using advisory fts3 node sizes of 50,
+# 500, 1000 and 2000 bytes.
+#
+# After the test using an advisory node-size of 50, an OOM test is run using
+# the database. This test is similar to step (d) above, except that it tests
+# the effects of transient and persistent OOM conditions encountered while
+# executing each query.
+#
+
set testdir [file dirname $argv0]
source $testdir/tester.tcl
ifcapable !fts3 { finish_test ; return }
source $testdir/fts3_common.tcl
+set G(nVocab) 100
+
set nVocab 100
set lVocab [list]
expr srand(0)
-
# Generate a vocabulary of nVocab words. Each word is 3 characters long.
#
set lChar {a b c d e f g h i j k l m n o p q r s t u v w x y z}