1- import * as duckdb from '../src/' ;
2- import { LogLevel } from '../src/' ;
1+ import {
2+ AsyncDuckDB ,
3+ AsyncDuckDBConnection ,
4+ ConsoleLogger ,
5+ DuckDBAccessMode ,
6+ DuckDBBundle ,
7+ DuckDBDataProtocol ,
8+ LogLevel
9+ } from '../src/' ;
310import * as arrow from 'apache-arrow' ;
411
5- export function testOPFS ( baseDir : string , bundle : ( ) => duckdb . DuckDBBundle ) : void {
6- let db : duckdb . AsyncDuckDB ;
7- let conn : duckdb . AsyncDuckDBConnection ;
12+ export function testOPFS ( baseDir : string , bundle : ( ) => DuckDBBundle ) : void {
13+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
14+
15+ let db : AsyncDuckDB ;
16+ let conn : AsyncDuckDBConnection ;
817
918 beforeAll ( async ( ) => {
10- removeFiles ( ) ;
19+ await removeFiles ( ) ;
1120 } ) ;
1221
1322 afterAll ( async ( ) => {
@@ -17,19 +26,18 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
1726 if ( db ) {
1827 await db . terminate ( ) ;
1928 }
20- removeFiles ( ) ;
29+ await removeFiles ( ) ;
2130 } ) ;
2231
2332 beforeEach ( async ( ) => {
24- removeFiles ( ) ;
33+ await removeFiles ( ) ;
2534 //
26- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
2735 const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
28- db = new duckdb . AsyncDuckDB ( logger , worker ) ;
36+ db = new AsyncDuckDB ( logger , worker ) ;
2937 await db . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
3038 await db . open ( {
3139 path : 'opfs://test.db' ,
32- accessMode : duckdb . DuckDBAccessMode . READ_WRITE
40+ accessMode : DuckDBAccessMode . READ_WRITE
3341 } ) ;
3442 conn = await db . connect ( ) ;
3543 } ) ;
@@ -41,12 +49,12 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
4149 if ( db ) {
4250 await db . terminate ( ) ;
4351 }
44- removeFiles ( ) ;
52+ await removeFiles ( ) ;
4553 } ) ;
4654
4755 describe ( 'Load Data in OPFS' , ( ) => {
4856 it ( 'Import Small Parquet file' , async ( ) => {
49- await conn . send ( `CREATE TABLE stu AS SELECT * FROM "${ baseDir } /uni/studenten.parquet"` ) ;
57+ await conn . send ( `CREATE TABLE stu AS SELECT * FROM "${ baseDir } /uni/studenten.parquet"` ) ;
5058 await conn . send ( `CHECKPOINT;` ) ;
5159 const result = await conn . send ( `SELECT matrnr FROM stu;` ) ;
5260 const batches = [ ] ;
@@ -60,7 +68,7 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
6068 } ) ;
6169
6270 it ( 'Import Larget Parquet file' , async ( ) => {
63- await conn . send ( `CREATE TABLE lineitem AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
71+ await conn . send ( `CREATE TABLE lineitem AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
6472 await conn . send ( `CHECKPOINT;` ) ;
6573 const result = await conn . send ( `SELECT count(*)::INTEGER as cnt FROM lineitem;` ) ;
6674 const batches = [ ] ;
@@ -72,18 +80,17 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
7280 } ) ;
7381
7482 it ( 'Load Existing DB File' , async ( ) => {
75- await conn . send ( `CREATE TABLE tmp AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
83+ await conn . send ( `CREATE TABLE tmp AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
7684 await conn . send ( `CHECKPOINT;` ) ;
7785 await conn . close ( ) ;
7886 await db . terminate ( ) ;
7987
80- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
8188 const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
82- db = new duckdb . AsyncDuckDB ( logger , worker ) ;
89+ db = new AsyncDuckDB ( logger , worker ) ;
8390 await db . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
8491 await db . open ( {
8592 path : 'opfs://test.db' ,
86- accessMode : duckdb . DuckDBAccessMode . READ_WRITE
93+ accessMode : DuckDBAccessMode . READ_WRITE
8794 } ) ;
8895 conn = await db . connect ( ) ;
8996
@@ -98,16 +105,16 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
98105
99106 it ( 'Load Parquet file that are already with empty handler' , async ( ) => {
100107 //1. write to opfs
101- const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
108+ const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
102109 res . arrayBuffer ( ) ,
103110 ) ;
104111 const opfsRoot = await navigator . storage . getDirectory ( ) ;
105- const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
112+ const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
106113 const writable = await fileHandle . createWritable ( ) ;
107114 await writable . write ( parquetBuffer ) ;
108115 await writable . close ( ) ;
109116 //2. handle is empty object, because worker gets a File Handle using the file name.
110- await db . registerFileHandle ( 'test.parquet' , null , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
117+ await db . registerFileHandle ( 'test.parquet' , null , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
111118 await conn . send ( `CREATE TABLE lineitem1 AS SELECT * FROM read_parquet('test.parquet')` ) ;
112119 await conn . send ( `CHECKPOINT;` ) ;
113120
@@ -122,17 +129,17 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
122129
123130 it ( 'Load Parquet file that are already with opfs file handler in datadir' , async ( ) => {
124131 //1. write to opfs
125- const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
132+ const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
126133 res . arrayBuffer ( ) ,
127134 ) ;
128135 const opfsRoot = await navigator . storage . getDirectory ( ) ;
129- const datadir = await opfsRoot . getDirectoryHandle ( "datadir" , { create : true } ) ;
130- const fileHandle = await datadir . getFileHandle ( 'test.parquet' , { create : true } ) ;
136+ const datadir = await opfsRoot . getDirectoryHandle ( "datadir" , { create : true } ) ;
137+ const fileHandle = await datadir . getFileHandle ( 'test.parquet' , { create : true } ) ;
131138 const writable = await fileHandle . createWritable ( ) ;
132139 await writable . write ( parquetBuffer ) ;
133140 await writable . close ( ) ;
134141 //2. handle is opfs file handler
135- await db . registerFileHandle ( 'test.parquet' , fileHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
142+ await db . registerFileHandle ( 'test.parquet' , fileHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
136143 await conn . send ( `CREATE TABLE lineitem1 AS SELECT * FROM read_parquet('test.parquet')` ) ;
137144 await conn . send ( `CHECKPOINT;` ) ;
138145
@@ -146,16 +153,16 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
146153 } ) ;
147154
148155 it ( 'Load Parquet file that are already' , async ( ) => {
149- const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
156+ const parquetBuffer = await fetch ( `${ baseDir } /tpch/0_01/parquet/lineitem.parquet` ) . then ( res =>
150157 res . arrayBuffer ( ) ,
151158 ) ;
152159 const opfsRoot = await navigator . storage . getDirectory ( ) ;
153- const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
160+ const fileHandle = await opfsRoot . getFileHandle ( 'test.parquet' , { create : true } ) ;
154161 const writable = await fileHandle . createWritable ( ) ;
155162 await writable . write ( parquetBuffer ) ;
156163 await writable . close ( ) ;
157164
158- await db . registerFileHandle ( 'test.parquet' , fileHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
165+ await db . registerFileHandle ( 'test.parquet' , fileHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
159166 await conn . send ( `CREATE TABLE lineitem1 AS SELECT * FROM read_parquet('test.parquet')` ) ;
160167 await conn . send ( `CHECKPOINT;` ) ;
161168 await conn . send ( `CREATE TABLE lineitem2 AS SELECT * FROM read_parquet('test.parquet')` ) ;
@@ -197,9 +204,9 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
197204
198205 it ( 'Drop File + Export as CSV to OPFS + Load CSV' , async ( ) => {
199206 const opfsRoot = await navigator . storage . getDirectory ( ) ;
200- const testHandle = await opfsRoot . getFileHandle ( 'test.csv' , { create : true } ) ;
201- await db . registerFileHandle ( 'test.csv' , testHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
202- await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
207+ const testHandle = await opfsRoot . getFileHandle ( 'test.csv' , { create : true } ) ;
208+ await db . registerFileHandle ( 'test.csv' , testHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
209+ await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
203210 await conn . send ( `COPY (SELECT * FROM zzz) TO 'test.csv'` ) ;
204211 await conn . send ( `COPY (SELECT * FROM zzz) TO 'non_existing.csv'` ) ;
205212 await conn . close ( ) ;
@@ -208,7 +215,7 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
208215
209216 await db . open ( { } ) ;
210217 conn = await db . connect ( ) ;
211- await db . registerFileHandle ( 'test.csv' , testHandle , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
218+ await db . registerFileHandle ( 'test.csv' , testHandle , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
212219
213220 const result = await conn . send ( `SELECT count(*)::INTEGER as cnt FROM 'test.csv';` ) ;
214221 const batches = [ ] ;
@@ -224,14 +231,14 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
224231
225232 it ( 'Drop Files + Export as CSV to OPFS + Load CSV' , async ( ) => {
226233 const opfsRoot = await navigator . storage . getDirectory ( ) ;
227- const testHandle1 = await opfsRoot . getFileHandle ( 'test1.csv' , { create : true } ) ;
228- const testHandle2 = await opfsRoot . getFileHandle ( 'test2.csv' , { create : true } ) ;
229- const testHandle3 = await opfsRoot . getFileHandle ( 'test3.csv' , { create : true } ) ;
230- await db . registerFileHandle ( 'test1.csv' , testHandle1 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
231- await db . registerFileHandle ( 'test2.csv' , testHandle2 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
232- await db . registerFileHandle ( 'test3.csv' , testHandle3 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
233-
234- await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
234+ const testHandle1 = await opfsRoot . getFileHandle ( 'test1.csv' , { create : true } ) ;
235+ const testHandle2 = await opfsRoot . getFileHandle ( 'test2.csv' , { create : true } ) ;
236+ const testHandle3 = await opfsRoot . getFileHandle ( 'test3.csv' , { create : true } ) ;
237+ await db . registerFileHandle ( 'test1.csv' , testHandle1 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
238+ await db . registerFileHandle ( 'test2.csv' , testHandle2 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
239+ await db . registerFileHandle ( 'test3.csv' , testHandle3 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
240+
241+ await conn . send ( `CREATE TABLE zzz AS SELECT * FROM "${ baseDir } /tpch/0_01/parquet/lineitem.parquet"` ) ;
235242 await conn . send ( `COPY (SELECT * FROM zzz) TO 'test1.csv'` ) ;
236243 await conn . send ( `COPY (SELECT * FROM zzz) TO 'test2.csv'` ) ;
237244 await conn . send ( `COPY (SELECT * FROM zzz) TO 'test3.csv'` ) ;
@@ -242,9 +249,9 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
242249
243250 await db . open ( { } ) ;
244251 conn = await db . connect ( ) ;
245- await db . registerFileHandle ( 'test1.csv' , testHandle1 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
246- await db . registerFileHandle ( 'test2.csv' , testHandle2 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
247- await db . registerFileHandle ( 'test3.csv' , testHandle3 , duckdb . DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
252+ await db . registerFileHandle ( 'test1.csv' , testHandle1 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
253+ await db . registerFileHandle ( 'test2.csv' , testHandle2 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
254+ await db . registerFileHandle ( 'test3.csv' , testHandle3 , DuckDBDataProtocol . BROWSER_FSACCESS , true ) ;
248255
249256 {
250257 const result1 = await conn . send ( `SELECT count(*)::INTEGER as cnt FROM 'test1.csv';` ) ;
@@ -280,14 +287,14 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
280287
281288 describe ( 'Open database in OPFS' , ( ) => {
282289 it ( 'should not open a non-existent DB file in read-only' , async ( ) => {
283- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
290+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
284291 const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
285- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
292+ const db_ = new AsyncDuckDB ( logger , worker ) ;
286293 await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
287294
288295 await expectAsync ( db_ . open ( {
289296 path : 'opfs://non_existent.db' ,
290- accessMode : duckdb . DuckDBAccessMode . READ_ONLY ,
297+ accessMode : DuckDBAccessMode . READ_ONLY ,
291298 } ) ) . toBeRejectedWithError ( Error , / f i l e o r d i r e c t o r y c o u l d n o t b e f o u n d / ) ;
292299
293300 await db_ . terminate ( ) ;
@@ -300,39 +307,39 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
300307 } ) ;
301308
302309 it ( 'should not open a non-existent DB file and mkdir in read-only' , async ( ) => {
303- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
310+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
304311 const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
305- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
312+ const db_ = new AsyncDuckDB ( logger , worker ) ;
306313 await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
307314
308315 await expectAsync ( db_ . open ( {
309316 path : 'opfs://duckdb_test/path/to/non_existent.db' ,
310- accessMode : duckdb . DuckDBAccessMode . READ_ONLY ,
317+ accessMode : DuckDBAccessMode . READ_ONLY ,
311318 } ) ) . toBeRejectedWithError ( Error , / f i l e o r d i r e c t o r y c o u l d n o t b e f o u n d / ) ;
312319
313320 await db_ . terminate ( ) ;
314321 await worker . terminate ( ) ;
315322 } ) ;
316323
317324 it ( 'should open a non-existent DB file and mkdir in read-write' , async ( ) => {
318- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
325+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
319326 const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
320- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
327+ const db_ = new AsyncDuckDB ( logger , worker ) ;
321328 await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
322329
323330 await expectAsync ( db_ . open ( {
324331 path : 'opfs://duckdb_test/path/to/duck.db' ,
325- accessMode : duckdb . DuckDBAccessMode . READ_WRITE ,
332+ accessMode : DuckDBAccessMode . READ_WRITE ,
326333 } ) ) . toBeResolved ( ) ;
327334
328335 await db_ . terminate ( ) ;
329336 await worker . terminate ( ) ;
330337 } ) ;
331338
332339 it ( 'should open a non-existent DB file in read-write and create files' , async ( ) => {
333- const logger = new duckdb . ConsoleLogger ( LogLevel . ERROR ) ;
340+ const logger = new ConsoleLogger ( LogLevel . ERROR ) ;
334341 const worker = new Worker ( bundle ( ) . mainWorker ! ) ;
335- const db_ = new duckdb . AsyncDuckDB ( logger , worker ) ;
342+ const db_ = new AsyncDuckDB ( logger , worker ) ;
336343 await db_ . instantiate ( bundle ( ) . mainModule , bundle ( ) . pthreadWorker ) ;
337344
338345 const opfsRoot = await navigator . storage . getDirectory ( ) ;
@@ -345,7 +352,7 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
345352
346353 await expectAsync ( db_ . open ( {
347354 path : 'opfs://non_existent_2.db' ,
348- accessMode : duckdb . DuckDBAccessMode . READ_WRITE ,
355+ accessMode : DuckDBAccessMode . READ_WRITE ,
349356 } ) ) . toBeResolved ( ) ;
350357
351358 await db_ . terminate ( ) ;
@@ -359,25 +366,29 @@ export function testOPFS(baseDir: string, bundle: () => duckdb.DuckDBBundle): vo
359366
360367 async function removeFiles ( ) {
361368 const opfsRoot = await navigator . storage . getDirectory ( ) ;
362- await opfsRoot . removeEntry ( 'test.db' ) . catch ( ( ) => { } ) ;
363- await opfsRoot . removeEntry ( 'test.db.wal' ) . catch ( ( ) => { } ) ;
364- await opfsRoot . removeEntry ( 'test.csv' ) . catch ( ( ) => { } ) ;
365- await opfsRoot . removeEntry ( 'test1.csv' ) . catch ( ( ) => { } ) ;
366- await opfsRoot . removeEntry ( 'test2.csv' ) . catch ( ( ) => { } ) ;
367- await opfsRoot . removeEntry ( 'test3.csv' ) . catch ( ( ) => { } ) ;
368- await opfsRoot . removeEntry ( 'test.parquet' ) . catch ( ( ) => { } ) ;
369+
370+ await opfsRoot . removeEntry ( 'test.db' ) . catch ( _ignore ) ;
371+ await opfsRoot . removeEntry ( 'test.db.wal' ) . catch ( _ignore ) ;
372+ await opfsRoot . removeEntry ( 'test.csv' ) . catch ( _ignore ) ;
373+ await opfsRoot . removeEntry ( 'test1.csv' ) . catch ( _ignore ) ;
374+ await opfsRoot . removeEntry ( 'test2.csv' ) . catch ( _ignore ) ;
375+ await opfsRoot . removeEntry ( 'test3.csv' ) . catch ( _ignore ) ;
376+ await opfsRoot . removeEntry ( 'test.parquet' ) . catch ( _ignore ) ;
369377 try {
370378 const datadir = await opfsRoot . getDirectoryHandle ( 'datadir' ) ;
371- datadir . removeEntry ( 'test.parquet' ) . catch ( ( ) => { } ) ;
379+ datadir . removeEntry ( 'test.parquet' ) . catch ( _ignore ) ;
372380 } catch ( e ) {
373381 //
374382 }
375- await opfsRoot . removeEntry ( 'datadir' ) . catch ( ( ) => { } ) ;
383+ await opfsRoot . removeEntry ( 'datadir' ) . catch ( _ignore ) ;
376384 // In case of failure caused leftovers
377- await opfsRoot . removeEntry ( 'non_existent.db' ) . catch ( ( ) => { } ) ;
378- await opfsRoot . removeEntry ( 'non_existent.db.wal' ) . catch ( ( ) => { } ) ;
379- await opfsRoot . removeEntry ( 'non_existent_2.db' ) . catch ( ( ) => { } ) ;
380- await opfsRoot . removeEntry ( 'non_existent_2.db.wal' ) . catch ( ( ) => { } ) ;
381- await opfsRoot . removeEntry ( 'duckdb_test' , { recursive : true } ) . catch ( ( ) => { } ) ;
385+ await opfsRoot . removeEntry ( 'non_existent.db' ) . catch ( _ignore ) ;
386+ await opfsRoot . removeEntry ( 'non_existent.db.wal' ) . catch ( _ignore ) ;
387+ await opfsRoot . removeEntry ( 'non_existent_2.db' ) . catch ( _ignore ) ;
388+ await opfsRoot . removeEntry ( 'non_existent_2.db.wal' ) . catch ( _ignore ) ;
389+ await opfsRoot . removeEntry ( 'duckdb_test' , { recursive : true } ) . catch ( _ignore ) ;
382390 }
383391}
392+
393+ //ignore block
394+ const _ignore : ( ) => void = ( ) => { } ;
0 commit comments