"metrics for",self.location.href,":",metrics,
"\nTotal of",n,"op(s) for",t,
"ms (incl. "+w+" ms of waiting on the async side)");
+ console.log("Serialization metrics:",JSON.stringify(metrics.s11n,0,2));
},
reset: function(){
let k;
for(k in state.opIds){
r(metrics[k] = Object.create(null));
}
+ let s = metrics.s11n = Object.create(null);
+ s = s.serialize = Object.create(null);
+ s.count = s.time = 0;
+ s = metrics.s11n.deserialize = Object.create(null);
+ s.count = s.time = 0;
//[ // timed routines which are not in state.opIds
// 'xFileControl'
//].forEach((k)=>r(metrics[k] = Object.create(null)));
};
const initS11n = ()=>{
- // Achtung: this code is 100% duplicated in the other half of this proxy!
+ /**
+ ACHTUNG: this code is 100% duplicated in the other half of
+ this proxy!
+
+ Historical note: this impl was initially about 5% this size by using
+ using JSON.stringify/parse(), but using fit-to-purpose serialization
+ saves considerable runtime.
+ */
if(state.s11n) return state.s11n;
- const jsonDecoder = new TextDecoder(),
- jsonEncoder = new TextEncoder('utf-8'),
- viewSz = new DataView(state.sabIO, state.sabS11nOffset, 4),
- viewJson = new Uint8Array(state.sabIO, state.sabS11nOffset+4, state.sabS11nSize-4);
+ const textDecoder = new TextDecoder(),
+ textEncoder = new TextEncoder('utf-8'),
+ viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
+ viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
state.s11n = Object.create(null);
+ const TypeIds = Object.create(null);
+ TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
+ TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
+ TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
+ TypeIds.string = { id: 4 };
+ const getTypeId = (v)=>{
+ return TypeIds[typeof v] || toss("This value type cannot be serialized.",v);
+ };
+ const getTypeIdById = (tid)=>{
+ switch(tid){
+ case TypeIds.number.id: return TypeIds.number;
+ case TypeIds.bigint.id: return TypeIds.bigint;
+ case TypeIds.boolean.id: return TypeIds.boolean;
+ case TypeIds.string.id: return TypeIds.string;
+ default: toss("Invalid type ID:",tid);
+ }
+ };
/**
Returns an array of the state serialized by the most recent
serialize() operation (here or in the counterpart thread), or
null if the serialization buffer is empty.
*/
state.s11n.deserialize = function(){
- const sz = viewSz.getInt32(0, state.littleEndian);
- const json = sz ? jsonDecoder.decode(
- viewJson.slice(0, sz)
- /* slice() (copy) needed, instead of subarray() (reference),
- because TextDecoder throws if asked to decode from an
- SAB. */
- ) : null;
- return JSON.parse(json);
- }
+ ++metrics.s11n.deserialize.count;
+ const t = performance.now();
+ let rc = null;
+ const argc = viewU8[0];
+ if(argc){
+ rc = [];
+ let offset = 1, i, n, v, typeIds = [];
+ for(i = 0; i < argc; ++i, ++offset){
+ typeIds.push(getTypeIdById(viewU8[offset]));
+ }
+ for(i = 0; i < argc; ++i){
+ const t = typeIds[i];
+ if(t.getter){
+ v = viewDV[t.getter](offset, state.littleEndian);
+ offset += t.size;
+ }else{
+ n = viewDV.getInt32(offset, state.littleEndian);
+ offset += 4;
+ v = textDecoder.decode(viewU8.slice(offset, offset+n));
+ offset += n;
+ }
+ rc.push(v);
+ }
+ }
+ //log("deserialize:",argc, rc);
+ metrics.s11n.deserialize.time += performance.now() - t;
+ return rc;
+ };
/**
Serializes all arguments to the shared buffer for consumption
- by the counterpart thread. This impl currently uses JSON for
- serialization for simplicy of implementation, but if that
- proves imperformant then a lower-level approach will be
- created.
-
- If passed "too much data" (more that the shared buffer size
- it will either throw or truncate the data (not certain
- which)). This routine is only intended for serializing OPFS
- VFS arguments and (in at least one special case) result
- values, and the buffer is sized to be able to comfortably
- handle those.
+ by the counterpart thread.
+
+ This routine is only intended for serializing OPFS VFS
+ arguments and (in at least one special case) result values,
+ and the buffer is sized to be able to comfortably handle
+ those.
If passed no arguments then it zeroes out the serialization
state.
*/
state.s11n.serialize = function(...args){
+ ++metrics.s11n.serialize.count;
+ const t = performance.now();
if(args.length){
- const json = jsonEncoder.encode(JSON.stringify(args));
- viewSz.setInt32(0, json.byteLength, state.littleEndian);
- viewJson.set(json);
+ //log("serialize():",args);
+ let i = 0, offset = 1, typeIds = [];
+ viewU8[0] = args.length & 0xff;
+ for(; i < args.length; ++i, ++offset){
+ typeIds.push(getTypeId(args[i]));
+ viewU8[offset] = typeIds[i].id;
+ }
+ for(i = 0; i < args.length; ++i) {
+ const t = typeIds[i];
+ if(t.setter){
+ viewDV[t.setter](offset, args[i], state.littleEndian);
+ offset += t.size;
+ }else{
+ const s = textEncoder.encode(args[i]);
+ viewDV.setInt32(offset, s.byteLength, state.littleEndian);
+ offset += 4;
+ viewU8.set(s, offset);
+ offset += s.byteLength;
+ }
+ }
+ //log("serialize() result:",viewU8.slice(0,offset));
}else{
- viewSz.setInt32(0, 0, state.littleEndian);
+ viewU8[0] = 0;
}
+ metrics.s11n.serialize.time += performance.now() - t;
};
return state.s11n;
- };
+ }/*initS11n()*/;
/**
Generates a random ASCII string len characters long, intended for
//| capi.SQLITE_OPEN_DELETEONCLOSE
| capi.SQLITE_OPEN_MAIN_DB;
const pOut = wasm.scopedAlloc(8);
- const dbFile = "/sanity/check/file";
+ const dbFile = "/sanity/check/file"+randomFilename(8);
const zDbFile = wasm.scopedAllocCString(dbFile);
let rc;
vfsSyncWrappers.xAccess(opfsVfs.pointer, zDbFile, 0, pOut);
for(k in state.opIds){
r(metrics[k] = Object.create(null));
}
+ let s = metrics.s11n = Object.create(null);
+ s = s.serialize = Object.create(null);
+ s.count = s.time = 0;
+ s = metrics.s11n.deserialize = Object.create(null);
+ s.count = s.time = 0;
};
metrics.dump = ()=>{
let k, n = 0, t = 0, w = 0;
/*dev console can't expand this object!*/,
"\nTotal of",n,"op(s) for",t,"ms",
"approx",w,"ms spent waiting on OPFS APIs.");
+ console.log("Serialization metrics:",JSON.stringify(metrics.s11n,0,2));
};
warn("This file is very much experimental and under construction.",
const initS11n = ()=>{
// Achtung: this code is 100% duplicated in the other half of this proxy!
+
+ /**
+ Historical note: this impl was initially about 1% this size by using
+ using JSON.stringify/parse(), but using fit-to-purpose serialization
+ saves considerable runtime.
+ */
+
if(state.s11n) return state.s11n;
- const jsonDecoder = new TextDecoder(),
- jsonEncoder = new TextEncoder('utf-8'),
- viewSz = new DataView(state.sabIO, state.sabS11nOffset, 4),
- viewJson = new Uint8Array(state.sabIO, state.sabS11nOffset+4, state.sabS11nSize-4);
+ const textDecoder = new TextDecoder(),
+ textEncoder = new TextEncoder('utf-8'),
+ viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
+ viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
state.s11n = Object.create(null);
+
+ const TypeIds = Object.create(null);
+ TypeIds.number = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
+ TypeIds.bigint = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
+ TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
+ TypeIds.string = { id: 4 };
+
+ const getTypeId = (v)=>{
+ return TypeIds[typeof v] || toss("This value type cannot be serialized.",v);
+ };
+ const getTypeIdById = (tid)=>{
+ switch(tid){
+ case TypeIds.number.id: return TypeIds.number;
+ case TypeIds.bigint.id: return TypeIds.bigint;
+ case TypeIds.boolean.id: return TypeIds.boolean;
+ case TypeIds.string.id: return TypeIds.string;
+ default: toss("Invalid type ID:",tid);
+ }
+ };
+
/**
Returns an array of the state serialized by the most recent
serialize() operation (here or in the counterpart thread), or
null if the serialization buffer is empty.
*/
state.s11n.deserialize = function(){
- const sz = viewSz.getInt32(0, state.littleEndian);
- const json = sz ? jsonDecoder.decode(
- viewJson.slice(0, sz)
- /* slice() (copy) needed, instead of subarray() (reference),
- because TextDecoder throws if asked to decode from an
- SAB. */
- ) : null;
- return JSON.parse(json);
- }
+ ++metrics.s11n.deserialize.count;
+ const t = performance.now();
+ let rc = null;
+ const argc = viewU8[0];
+ if(argc){
+ rc = [];
+ let offset = 1, i, n, v, typeIds = [];
+ for(i = 0; i < argc; ++i, ++offset){
+ typeIds.push(getTypeIdById(viewU8[offset]));
+ }
+ for(i = 0; i < argc; ++i){
+ const t = typeIds[i];
+ if(t.getter){
+ v = viewDV[t.getter](offset, state.littleEndian);
+ offset += t.size;
+ }else{
+ n = viewDV.getInt32(offset, state.littleEndian);
+ offset += 4;
+ v = textDecoder.decode(viewU8.slice(offset, offset+n));
+ offset += n;
+ }
+ rc.push(v);
+ }
+ }
+ //log("deserialize:",argc, rc);
+ metrics.s11n.deserialize.time += performance.now() - t;
+ return rc;
+ };
+
/**
Serializes all arguments to the shared buffer for consumption
- by the counterpart thread. This impl currently uses JSON for
- serialization for simplicy of implementation, but if that
- proves imperformant then a lower-level approach will be
- created.
-
- If passed "too much data" (more that the shared buffer size
- it will either throw or truncate the data (not certain
- which)). This routine is only intended for serializing OPFS
- VFS arguments and (in at least one special case) result
- values, and the buffer is sized to be able to comfortably
- handle those.
+ by the counterpart thread.
+
+ This routine is only intended for serializing OPFS VFS
+ arguments and (in at least one special case) result values,
+ and the buffer is sized to be able to comfortably handle
+ those.
If passed no arguments then it zeroes out the serialization
state.
*/
state.s11n.serialize = function(...args){
+ ++metrics.s11n.serialize.count;
+ const t = performance.now();
if(args.length){
- const json = jsonEncoder.encode(JSON.stringify(args));
- viewSz.setInt32(0, json.byteLength, state.littleEndian);
- viewJson.set(json);
+ //log("serialize():",args);
+ let i = 0, offset = 1, typeIds = [];
+ viewU8[0] = args.length & 0xff;
+ for(; i < args.length; ++i, ++offset){
+ typeIds.push(getTypeId(args[i]));
+ viewU8[offset] = typeIds[i].id;
+ }
+ for(i = 0; i < args.length; ++i) {
+ const t = typeIds[i];
+ if(t.setter){
+ viewDV[t.setter](offset, args[i], state.littleEndian);
+ offset += t.size;
+ }else{
+ const s = textEncoder.encode(args[i]);
+ viewDV.setInt32(offset, s.byteLength, state.littleEndian);
+ offset += 4;
+ viewU8.set(s, offset);
+ offset += s.byteLength;
+ }
+ }
+ //log("serialize() result:",viewU8.slice(0,offset));
}else{
- viewSz.setInt32(0, 0, state.littleEndian);
+ viewU8[0] = 0;
}
+ metrics.s11n.serialize.time += performance.now() - t;
};
return state.s11n;
-};
+}/*initS11n()*/;
const waitLoop = async function f(){
const opHandlers = Object.create(null);
-C OPFS\sproxy:\sremove\sone\ssanity-checking-only\slevel\sof\sproxy\sfunction\sto\sshave\soff\sa\sfew\smicroseconds.
-D 2022-09-20T10:47:36.917
+C Speed\sup\sde/serialization\sof\sfunc\sargs\sand\sreturn\svalues\sin\sthe\sOPFS\sVFS\sproxy.
+D 2022-09-20T13:25:39.815
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724
F ext/wasm/api/sqlite3-api-cleanup.js 8564a6077cdcaea9a9f428a019af8a05887f0131e6a2a1e72a7ff1145fadfe77
F ext/wasm/api/sqlite3-api-glue.js 366d580c8e5bf7fcf4c6dee6f646c31f5549bd417ea03a59a0acca00e8ecce30
F ext/wasm/api/sqlite3-api-oo1.js 2d13dddf0d2b4168a9249f124134d37924331e5b55e05dba18b6d661fbeefe48
-F ext/wasm/api/sqlite3-api-opfs.js 10063ff6bc4065a27c81a71f41eb429b53390b5bc885a6305243bf83e3bd94c4
+F ext/wasm/api/sqlite3-api-opfs.js ce75aba0cbfb600cf839362012d17b7b2984aeac5189586c9a5a8f37a573a929
F ext/wasm/api/sqlite3-api-prologue.js 0d2639387b94c30f492d4aea6e44fb7b16720808678464559458fd2ae3759655
F ext/wasm/api/sqlite3-api-worker1.js ee4cf149cbacb63d06b536674f822aa5088b7e022cdffc69f1f36cebe2f9fea0
F ext/wasm/api/sqlite3-wasi.h 25356084cfe0d40458a902afb465df8c21fc4152c1d0a59b563a3fba59a068f9
F ext/wasm/split-speedtest1-script.sh a3e271938d4d14ee49105eb05567c6a69ba4c1f1293583ad5af0cd3a3779e205 x
F ext/wasm/sql/000-mandelbrot.sql 775337a4b80938ac8146aedf88808282f04d02d983d82675bd63d9c2d97a15f0
F ext/wasm/sql/001-sudoku.sql 35b7cb7239ba5d5f193bc05ec379bcf66891bce6f2a5b3879f2f78d0917299b5
-F ext/wasm/sqlite3-opfs-async-proxy.js e3c5e1b6416e9c08c713c43fa98319b06fac622ecb813f294b047072b089fba6
+F ext/wasm/sqlite3-opfs-async-proxy.js 9305d92f32d02983c4528b9c801096cfd8295ca7d24e357d90de9bbcb201d035
F ext/wasm/sqlite3-worker1-promiser.js 4fd0465688a28a75f1d4ee4406540ba494f49844e3cad0670d0437a001943365
F ext/wasm/sqlite3-worker1.js 0c1e7626304543969c3846573e080c082bf43bcaa47e87d416458af84f340a9e
F ext/wasm/test-opfs-vfs.html eb69dda21eb414b8f5e3f7c1cc0f774103cc9c0f87b2d28a33419e778abfbab5
F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc
F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e
F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0
-P 88de20712bbe3e11ea74af95edc34e9ab9f346f9aa0a30931e5a9e1f96ce57b0
-R e087e3859232a8d2cb7d73aa59ed1981
+P b534831f3efb8910a17e29956e3e87cc80055ea66e15dbef992b6a556ff042f8
+R 28e069ac7c71e059841784989b21e62c
U stephan
-Z c97570003be4379736d19c807db44fd0
+Z 483126717a4194dd1997ada5cd19f259
# Remove this line to create a well-formed Fossil manifest.
-b534831f3efb8910a17e29956e3e87cc80055ea66e15dbef992b6a556ff042f8
\ No newline at end of file
+5bf235bbe035e4ace7a54851e190742528af6b4266328a1b8bbb9fb3dd7f2118
\ No newline at end of file