/ tests / utils / sqlite-concurrency.test.js
sqlite-concurrency.test.js
  1  /**
  2   * SQLite Concurrency Safety & Backup Integrity Tests
  3   *
  4   * Context: On 2026-03-20, 249K sites were lost due to zero-sized backups.
  5   * SQLite BUSY errors under concurrent access are a known risk.
  6   *
  7   * These tests verify:
  8   * 1. The DB connection is configured with busy_timeout and WAL mode
  9   * 2. Two concurrent write transactions both succeed (no SQLITE_BUSY crash)
 10   * 3. integrity_check passes after concurrent writes
 11   * 4. Backup via better-sqlite3 produces a valid, non-zero SQLite file
 12   * 5. Backup file passes integrity_check
 13   * 6. Backup during concurrent writes still produces a valid file
 14   *
 15   * Uses real SQLite files in /tmp/ — in-memory DBs cannot test file-level locking.
 16   */
 17  
 18  import { test, describe, before, after } from 'node:test';
 19  import assert from 'node:assert/strict';
 20  import Database from 'better-sqlite3';
 21  import { createPgMock } from '../helpers/pg-mock.js'; // eslint-disable-line no-unused-vars
 22  import { existsSync, unlinkSync, statSync, readdirSync } from 'fs';
 23  import { join } from 'path';
 24  import { randomUUID } from 'crypto';
 25  
 26  // ─── Helpers ─────────────────────────────────────────────────────────────────
 27  
 28  const TEST_PREFIX = `/tmp/sqlite-concurrency-test-${Date.now()}`;
 29  const cleanupPaths = [];
 30  
 31  function tempDbPath(label) {
 32    const p = `${TEST_PREFIX}-${label}-${randomUUID().slice(0, 8)}.db`;
 33    cleanupPaths.push(p);
 34    return p;
 35  }
 36  
 37  /**
 38   * Open a DB with production-equivalent settings (mirrors src/utils/db.js).
 39   */
 40  function openDb(dbPath) {
 41    const db = new Database(dbPath);
 42    db.pragma('foreign_keys = ON');
 43    db.pragma('journal_mode = WAL');
 44    db.pragma('busy_timeout = 15000');
 45    db.pragma('wal_autocheckpoint = 4000');
 46    return db;
 47  }
 48  
 49  /**
 50   * Create a test table and seed it with rows.
 51   */
 52  function seedTable(db, tableName = 'items', rowCount = 100) {
 53    db.exec(`
 54      CREATE TABLE IF NOT EXISTS ${tableName} (
 55        id INTEGER PRIMARY KEY AUTOINCREMENT,
 56        value TEXT NOT NULL,
 57        counter INTEGER DEFAULT 0,
 58        created_at TEXT DEFAULT (datetime('now'))
 59      )
 60    `);
 61    const insert = db.prepare(`INSERT INTO ${tableName} (value) VALUES (?)`);
 62    const insertMany = db.transaction((rows) => {
 63      for (const row of rows) insert.run(row);
 64    });
 65    const rows = Array.from({ length: rowCount }, (_, i) => `seed-${i}`);
 66    insertMany(rows);
 67    return rowCount;
 68  }
 69  
 70  after(() => {
 71    for (const p of cleanupPaths) {
 72      for (const suffix of ['', '-wal', '-shm', '-journal']) {
 73        try { if (existsSync(p + suffix)) unlinkSync(p + suffix); } catch { /* cleanup */ }
 74      }
 75    }
 76    // Clean any leftover backup files
 77    try {
 78      const tmpFiles = readdirSync('/tmp').filter(f => f.startsWith('sqlite-concurrency-test-'));
 79      for (const f of tmpFiles) {
 80        try { unlinkSync(join('/tmp', f)); } catch { /* cleanup */ }
 81      }
 82    } catch { /* cleanup */ }
 83  });
 84  
 85  // ─── Connection Configuration Tests ──────────────────────────────────────────
 86  
 87  describe('SQLite connection configuration', () => {
 88  
 89    test('busy_timeout is set to 15000ms on the connection', () => {
 90      const dbPath = tempDbPath('busytimeout');
 91      const db = openDb(dbPath);
 92      try {
 93        // PRAGMA busy_timeout returns { timeout: N }, not { busy_timeout: N }
 94        const row = db.prepare('PRAGMA busy_timeout').get();
 95        assert.equal(row.timeout, 15000, 'busy_timeout should be 15000ms');
 96      } finally {
 97        db.close();
 98      }
 99    });
100  
101    test('WAL journal mode is active', () => {
102      const dbPath = tempDbPath('walmode');
103      const db = openDb(dbPath);
104      try {
105        const row = db.prepare('PRAGMA journal_mode').get();
106        assert.equal(row.journal_mode, 'wal', 'journal_mode should be WAL');
107      } finally {
108        db.close();
109      }
110    });
111  
112    test('foreign_keys are enabled', () => {
113      const dbPath = tempDbPath('fk');
114      const db = openDb(dbPath);
115      try {
116        const row = db.prepare('PRAGMA foreign_keys').get();
117        assert.equal(row.foreign_keys, 1, 'foreign_keys should be ON');
118      } finally {
119        db.close();
120      }
121    });
122  
123    test('wal_autocheckpoint is set to 4000', () => {
124      const dbPath = tempDbPath('walcheckpoint');
125      const db = openDb(dbPath);
126      try {
127        const row = db.prepare('PRAGMA wal_autocheckpoint').get();
128        assert.equal(row.wal_autocheckpoint, 4000, 'wal_autocheckpoint should be 4000');
129      } finally {
130        db.close();
131      }
132    });
133  
134    test('WAL file is created on disk after writes', () => {
135      const dbPath = tempDbPath('walfile');
136      const db = openDb(dbPath);
137      try {
138        seedTable(db, 'waltest', 10);
139        // WAL file should exist on disk
140        assert.ok(existsSync(`${dbPath  }-wal`), 'WAL file should exist after writes');
141      } finally {
142        db.close();
143      }
144    });
145  });
146  
147  // ─── Concurrent Write Tests ──────────────────────────────────────────────────
148  
149  describe('Concurrent write safety', () => {
150  
151    test('two connections writing concurrently both succeed', async () => {
152      const dbPath = tempDbPath('concurrent-writes');
153      // Set up schema with connection 1
154      const setup = openDb(dbPath);
155      seedTable(setup, 'concurrent_test', 0);
156      setup.close();
157  
158      const WRITES_PER_CONN = 200;
159  
160      // Open two separate connections to the same file
161      const db1 = openDb(dbPath);
162      const db2 = openDb(dbPath);
163  
164      const insert1 = db1.prepare('INSERT INTO concurrent_test (value) VALUES (?)');
165      const insert2 = db2.prepare('INSERT INTO concurrent_test (value) VALUES (?)');
166  
167      try {
168        // Run writes from both connections interleaved
169        // better-sqlite3 is synchronous, so we alternate writes to simulate contention
170        const errors = [];
171        for (let i = 0; i < WRITES_PER_CONN; i++) {
172          try { insert1.run(`conn1-${i}`); } catch (e) { errors.push(`conn1-${i}: ${e.message}`); }
173          try { insert2.run(`conn2-${i}`); } catch (e) { errors.push(`conn2-${i}: ${e.message}`); }
174        }
175  
176        assert.equal(errors.length, 0, `Expected zero BUSY errors, got: ${errors.join('; ')}`);
177  
178        // Verify all rows landed
179        const count = db1.prepare('SELECT COUNT(*) as n FROM concurrent_test').get().n;
180        assert.equal(count, WRITES_PER_CONN * 2, `All ${WRITES_PER_CONN * 2} rows should be present`);
181      } finally {
182        db1.close();
183        db2.close();
184      }
185    });
186  
187    test('two connections with competing transactions both commit', () => {
188      const dbPath = tempDbPath('concurrent-txn');
189      const setup = openDb(dbPath);
190      seedTable(setup, 'txn_test', 50);
191      setup.close();
192  
193      const db1 = openDb(dbPath);
194      const db2 = openDb(dbPath);
195  
196      try {
197        // Connection 1: batch update in a transaction
198        const update1 = db1.transaction(() => {
199          db1.prepare('UPDATE txn_test SET counter = counter + 1 WHERE id <= 25').run();
200          return db1.prepare('SELECT SUM(counter) as total FROM txn_test WHERE id <= 25').get().total;
201        });
202  
203        // Connection 2: batch update in a transaction (overlapping rows)
204        const update2 = db2.transaction(() => {
205          db2.prepare('UPDATE txn_test SET counter = counter + 10 WHERE id > 25').run();
206          return db2.prepare('SELECT SUM(counter) as total FROM txn_test WHERE id > 25').get().total;
207        });
208  
209        // Both transactions should succeed — WAL + busy_timeout handles the lock contention
210        const total1 = update1();
211        const total2 = update2();
212  
213        assert.ok(total1 > 0, 'Transaction 1 should have updated rows');
214        assert.ok(total2 > 0, 'Transaction 2 should have updated rows');
215  
216        // Verify from a third connection
217        const verify = openDb(dbPath);
218        const row = verify.prepare('SELECT SUM(counter) as total FROM txn_test').get();
219        assert.equal(row.total, total1 + total2, 'Sum of all counters should match both transactions');
220        verify.close();
221      } finally {
222        db1.close();
223        db2.close();
224      }
225    });
226  
227    test('rapid alternating writes from multiple connections', () => {
228      const dbPath = tempDbPath('rapid-alt');
229      const setup = openDb(dbPath);
230      seedTable(setup, 'rapid_test', 0);
231      setup.close();
232  
233      const NUM_CONNS = 4;
234      const WRITES_EACH = 100;
235      const conns = [];
236  
237      try {
238        for (let c = 0; c < NUM_CONNS; c++) {
239          conns.push(openDb(dbPath));
240        }
241  
242        const stmts = conns.map(db => db.prepare('INSERT INTO rapid_test (value) VALUES (?)'));
243        const errors = [];
244  
245        for (let i = 0; i < WRITES_EACH; i++) {
246          for (let c = 0; c < NUM_CONNS; c++) {
247            try {
248              stmts[c].run(`conn${c}-row${i}`);
249            } catch (e) {
250              errors.push(`conn${c}-row${i}: ${e.message}`);
251            }
252          }
253        }
254  
255        assert.equal(errors.length, 0, `Expected zero errors, got ${errors.length}: ${errors.slice(0, 5).join('; ')}`);
256  
257        const count = conns[0].prepare('SELECT COUNT(*) as n FROM rapid_test').get().n;
258        assert.equal(count, NUM_CONNS * WRITES_EACH, `All ${NUM_CONNS * WRITES_EACH} rows should exist`);
259      } finally {
260        for (const db of conns) db.close();
261      }
262    });
263  
264    test('integrity_check passes after concurrent writes', () => {
265      const dbPath = tempDbPath('integrity-post');
266      const setup = openDb(dbPath);
267      seedTable(setup, 'integrity_test', 500);
268      setup.close();
269  
270      // Hammer it with two connections
271      const db1 = openDb(dbPath);
272      const db2 = openDb(dbPath);
273      const ins1 = db1.prepare('INSERT INTO integrity_test (value) VALUES (?)');
274      const ins2 = db2.prepare('INSERT INTO integrity_test (value) VALUES (?)');
275  
276      for (let i = 0; i < 300; i++) {
277        ins1.run(`post-integrity-a-${i}`);
278        ins2.run(`post-integrity-b-${i}`);
279      }
280      db1.close();
281      db2.close();
282  
283      // Now verify integrity
284      const verify = openDb(dbPath);
285      try {
286        const result = verify.prepare('PRAGMA integrity_check').get();
287        assert.equal(result.integrity_check, 'ok', 'Database should pass integrity_check after concurrent writes');
288      } finally {
289        verify.close();
290      }
291    });
292  
293    test('BUSY error without busy_timeout set', () => {
294      // Demonstrates that without busy_timeout, lock contention causes errors.
295      // This validates that our busy_timeout setting is actually necessary.
296      const dbPath = tempDbPath('no-timeout');
297      const setup = new Database(dbPath);
298      setup.pragma('journal_mode = WAL');
299      // Explicitly set busy_timeout to 0 (no wait)
300      setup.pragma('busy_timeout = 0');
301      setup.exec(`
302        CREATE TABLE no_timeout_test (
303          id INTEGER PRIMARY KEY AUTOINCREMENT,
304          value TEXT
305        )
306      `);
307      setup.close();
308  
309      const db1 = new Database(dbPath);
310      db1.pragma('journal_mode = WAL');
311      db1.pragma('busy_timeout = 0');
312  
313      const db2 = new Database(dbPath);
314      db2.pragma('journal_mode = WAL');
315      db2.pragma('busy_timeout = 0');
316  
317      try {
318        // Start an exclusive write transaction on db1 that we hold open
319        db1.exec('BEGIN IMMEDIATE');
320        db1.exec("INSERT INTO no_timeout_test (value) VALUES ('from-db1')");
321  
322        // db2 should fail immediately because db1 holds the write lock
323        // and busy_timeout is 0
324        let gotBusy = false;
325        try {
326          db2.exec('BEGIN IMMEDIATE');
327        } catch (e) {
328          if (e.message.includes('SQLITE_BUSY') || e.code === 'SQLITE_BUSY') {
329            gotBusy = true;
330          }
331        }
332  
333        assert.ok(gotBusy, 'Should get SQLITE_BUSY when busy_timeout is 0 and write lock is held');
334  
335        db1.exec('COMMIT');
336      } finally {
337        try { db1.exec('ROLLBACK'); } catch { /* may already be committed */ }
338        db1.close();
339        db2.close();
340      }
341    });
342  
343    test('busy_timeout allows second connection to wait and succeed', () => {
344      const dbPath = tempDbPath('timeout-wait');
345      const setup = openDb(dbPath);
346      setup.exec(`
347        CREATE TABLE timeout_test (
348          id INTEGER PRIMARY KEY AUTOINCREMENT,
349          value TEXT
350        )
351      `);
352      setup.close();
353  
354      const db1 = openDb(dbPath);
355      const db2 = openDb(dbPath);
356  
357      try {
358        // db1 starts a write transaction
359        db1.exec('BEGIN IMMEDIATE');
360        db1.exec("INSERT INTO timeout_test (value) VALUES ('held-by-db1')");
361        db1.exec('COMMIT');
362  
363        // db2 should now succeed (lock released)
364        db2.exec('BEGIN IMMEDIATE');
365        db2.exec("INSERT INTO timeout_test (value) VALUES ('from-db2')");
366        db2.exec('COMMIT');
367  
368        const count = db1.prepare('SELECT COUNT(*) as n FROM timeout_test').get().n;
369        assert.equal(count, 2, 'Both rows should be present');
370      } finally {
371        db1.close();
372        db2.close();
373      }
374    });
375  });
376  
377  // ─── Backup Integrity Tests ─────────────────────────────────────────────────
378  
379  describe('Backup integrity', () => {
380  
381    test('backup produces a valid SQLite file', async () => {
382      const dbPath = tempDbPath('backup-valid');
383      const backupPath = tempDbPath('backup-valid-copy');
384      const db = openDb(dbPath);
385  
386      try {
387        seedTable(db, 'backup_test', 200);
388  
389        // Use better-sqlite3 backup API (same as cron.js backupDatabase)
390        await db.backup(backupPath);
391  
392        // Verify backup file exists and is valid SQLite
393        assert.ok(existsSync(backupPath), 'Backup file should exist');
394  
395        const backupDb = new Database(backupPath, { readonly: true });
396        const row = backupDb.prepare('SELECT COUNT(*) as n FROM backup_test').get();
397        assert.equal(row.n, 200, 'Backup should contain all 200 rows');
398        backupDb.close();
399      } finally {
400        db.close();
401      }
402    });
403  
404    test('backup file is non-zero size', async () => {
405      const dbPath = tempDbPath('backup-size');
406      const backupPath = tempDbPath('backup-size-copy');
407      const db = openDb(dbPath);
408  
409      try {
410        seedTable(db, 'size_test', 500);
411        await db.backup(backupPath);
412  
413        const stats = statSync(backupPath);
414        assert.ok(stats.size > 0, `Backup file should be non-zero, got ${stats.size} bytes`);
415        // With 500 rows of text data, should be at least a few KB
416        assert.ok(stats.size > 4096, `Backup should be at least 4KB, got ${stats.size} bytes`);
417      } finally {
418        db.close();
419      }
420    });
421  
422    test('backup file passes integrity_check', async () => {
423      const dbPath = tempDbPath('backup-integrity');
424      const backupPath = tempDbPath('backup-integrity-copy');
425      const db = openDb(dbPath);
426  
427      try {
428        seedTable(db, 'integrity_backup', 300);
429        // Add some indexes to make the integrity check more meaningful
430        db.exec('CREATE INDEX idx_integrity_value ON integrity_backup(value)');
431        db.exec('CREATE INDEX idx_integrity_counter ON integrity_backup(counter)');
432  
433        await db.backup(backupPath);
434  
435        const backupDb = new Database(backupPath, { readonly: true });
436        const result = backupDb.prepare('PRAGMA integrity_check').get();
437        assert.equal(result.integrity_check, 'ok', 'Backup should pass integrity_check');
438        backupDb.close();
439      } finally {
440        db.close();
441      }
442    });
443  
444    test('backup row count matches source', async () => {
445      const dbPath = tempDbPath('backup-count');
446      const backupPath = tempDbPath('backup-count-copy');
447      const db = openDb(dbPath);
448  
449      try {
450        const expected = seedTable(db, 'count_test', 1000);
451        await db.backup(backupPath);
452  
453        const liveCount = db.prepare('SELECT COUNT(*) as n FROM count_test').get().n;
454        const backupDb = new Database(backupPath, { readonly: true });
455        const backupCount = backupDb.prepare('SELECT COUNT(*) as n FROM count_test').get().n;
456        backupDb.close();
457  
458        assert.equal(liveCount, expected, 'Live DB should have expected row count');
459        assert.equal(backupCount, expected, 'Backup should have same row count as live');
460        assert.equal(backupCount, liveCount, 'Backup count must match live count');
461      } finally {
462        db.close();
463      }
464    });
465  
466    test('backup during concurrent writes produces a valid file', async () => {
467      const dbPath = tempDbPath('backup-during-writes');
468      const backupPath = tempDbPath('backup-during-writes-copy');
469  
470      // Set up the DB with initial data
471      const setup = openDb(dbPath);
472      seedTable(setup, 'concurrent_backup', 500);
473      setup.close();
474  
475      // Open a writer connection that will keep writing during backup
476      const writer = openDb(dbPath);
477      const insertStmt = writer.prepare('INSERT INTO concurrent_backup (value) VALUES (?)');
478  
479      // Open the source connection for backup
480      const source = openDb(dbPath);
481  
482      try {
483        // Start writing in a tight loop while backup runs
484        let writesCompleted = 0;
485        let writeError = null;
486  
487        // The backup API in better-sqlite3 supports a progress callback.
488        // We write rows between backup pages to simulate concurrent access.
489        const backupPromise = source.backup(backupPath, {
490          progress({ totalPages, remainingPages }) {
491            // Write some rows between backup steps
492            try {
493              for (let j = 0; j < 5; j++) {
494                insertStmt.run(`during-backup-${writesCompleted}`);
495                writesCompleted++;
496              }
497            } catch (e) {
498              // Capture but don't fail the backup — BUSY errors during backup
499              // are handled by the busy_timeout retry
500              if (!e.message.includes('SQLITE_BUSY')) {
501                writeError = e;
502              }
503            }
504            return 100; // Process 100 pages per step
505          },
506        });
507  
508        await backupPromise;
509  
510        // Writes during backup should have mostly succeeded
511        assert.ok(writesCompleted > 0, `Should have completed writes during backup, got ${writesCompleted}`);
512        assert.equal(writeError, null, `Should have no non-BUSY write errors: ${writeError?.message}`);
513  
514        // Backup file must exist and be non-zero
515        assert.ok(existsSync(backupPath), 'Backup file should exist');
516        const stats = statSync(backupPath);
517        assert.ok(stats.size > 0, `Backup should be non-zero, got ${stats.size}`);
518  
519        // Backup must pass integrity check
520        const backupDb = new Database(backupPath, { readonly: true });
521        const integrity = backupDb.prepare('PRAGMA integrity_check').get();
522        assert.equal(integrity.integrity_check, 'ok', 'Backup created during writes should pass integrity_check');
523  
524        // Backup should contain at least the original seeded rows
525        const backupCount = backupDb.prepare('SELECT COUNT(*) as n FROM concurrent_backup').get().n;
526        assert.ok(backupCount >= 500, `Backup should have at least 500 rows (original seed), got ${backupCount}`);
527        backupDb.close();
528      } finally {
529        writer.close();
530        source.close();
531      }
532    });
533  
534    test('backup with progress callback and timeout mimics production pattern', async () => {
535      const dbPath = tempDbPath('backup-prod-pattern');
536      const backupPath = tempDbPath('backup-prod-pattern-copy');
537      const db = openDb(dbPath);
538  
539      try {
540        seedTable(db, 'prod_pattern', 1000);
541  
542        // Mirrors the pattern from cron.js backupDatabase, but with the correct
543        // abort mechanism (throw instead of return 0 — see backup abort test above).
544        const BACKUP_TIMEOUT_MS = 10_000; // 10s for test
545        const backupStart = Date.now();
546        let lastLoggedPct = -1;
547        let progressCalls = 0;
548  
549        await db.backup(backupPath, {
550          progress({ totalPages, remainingPages }) {
551            progressCalls++;
552            const elapsed = Date.now() - backupStart;
553            if (elapsed > BACKUP_TIMEOUT_MS) {
554              throw new Error('BACKUP_TIMEOUT'); // throw, not return 0
555            }
556            const pct = Math.floor(((totalPages - remainingPages) / totalPages) * 100);
557            if (pct >= lastLoggedPct + 10 || remainingPages < 100) {
558              lastLoggedPct = pct;
559            }
560            return 5000; // pages per step, same as production
561          },
562        });
563  
564        assert.ok(progressCalls > 0, 'Progress callback should have been called');
565  
566        // Verify the backup
567        const backupDb = new Database(backupPath, { readonly: true });
568        const count = backupDb.prepare('SELECT COUNT(*) as n FROM prod_pattern').get().n;
569        assert.equal(count, 1000, 'Backup should contain all rows');
570  
571        const integrity = backupDb.prepare('PRAGMA integrity_check').get();
572        assert.equal(integrity.integrity_check, 'ok', 'Production-pattern backup should pass integrity_check');
573        backupDb.close();
574      } finally {
575        db.close();
576      }
577    });
578  
579    test('backup abort via throw rejects the promise and stops', async () => {
580      // IMPORTANT FINDING: returning 0 from progress does NOT abort the backup —
581      // it spins infinitely calling progress with 0 pages per step.
582      // The ONLY way to abort a better-sqlite3 backup is to throw from progress.
583      // See cron.js backupDatabase (line ~707): `return 0` is a bug that causes
584      // infinite spin on timeout. The fix is to throw instead.
585      const dbPath = tempDbPath('backup-abort');
586      const backupPath = tempDbPath('backup-abort-copy');
587      const db = openDb(dbPath);
588  
589      try {
590        seedTable(db, 'abort_test', 500);
591  
592        let progressCalls = 0;
593        const abortError = new Error('BACKUP_TIMEOUT');
594  
595        await assert.rejects(
596          () => db.backup(backupPath, {
597            progress() {
598              progressCalls++;
599              // Throwing is the only way to abort a better-sqlite3 backup
600              throw abortError;
601            },
602          }),
603          (err) => err === abortError,
604          'Backup should reject with the thrown error',
605        );
606  
607        assert.equal(progressCalls, 1, 'Progress should have been called exactly once before abort');
608  
609        // Clean up any partial file (mimics cron.js cleanup pattern)
610        if (existsSync(backupPath)) {
611          unlinkSync(backupPath);
612        }
613      } finally {
614        db.close();
615      }
616    });
617  });
618  
619  // ─── WAL Checkpoint Tests ────────────────────────────────────────────────────
620  
621  describe('WAL checkpoint behavior', () => {
622  
623    test('PASSIVE checkpoint does not block writers', () => {
624      const dbPath = tempDbPath('checkpoint-passive');
625      const db = openDb(dbPath);
626  
627      try {
628        seedTable(db, 'checkpoint_test', 100);
629  
630        // Run a passive checkpoint (same as cron.js pre-backup step)
631        const result = db.pragma('wal_checkpoint(PASSIVE)');
632        assert.ok(Array.isArray(result) && result.length > 0, 'Checkpoint should return a result');
633  
634        // Should still be able to write after passive checkpoint
635        const insert = db.prepare('INSERT INTO checkpoint_test (value) VALUES (?)');
636        assert.doesNotThrow(() => {
637          insert.run('after-checkpoint');
638        }, 'Should be able to write after PASSIVE checkpoint');
639  
640        const count = db.prepare('SELECT COUNT(*) as n FROM checkpoint_test').get().n;
641        assert.equal(count, 101, 'Should have original rows plus the post-checkpoint row');
642      } finally {
643        db.close();
644      }
645    });
646  
647    test('TRUNCATE checkpoint resets WAL file', () => {
648      const dbPath = tempDbPath('checkpoint-truncate');
649      const db = openDb(dbPath);
650  
651      try {
652        seedTable(db, 'truncate_test', 200);
653  
654        // WAL should have data
655        assert.ok(existsSync(`${dbPath  }-wal`), 'WAL file should exist before checkpoint');
656  
657        // TRUNCATE checkpoint: moves all WAL data into main DB and truncates WAL
658        db.pragma('wal_checkpoint(TRUNCATE)');
659  
660        // WAL file may still exist but should be empty (0 bytes) or very small
661        if (existsSync(`${dbPath  }-wal`)) {
662          const walSize = statSync(`${dbPath  }-wal`).size;
663          // After TRUNCATE, WAL should be reset (typically 0 bytes or just the header)
664          assert.ok(walSize <= 4152, `WAL should be near-empty after TRUNCATE, got ${walSize} bytes`);
665        }
666  
667        // Data should still be accessible
668        const count = db.prepare('SELECT COUNT(*) as n FROM truncate_test').get().n;
669        assert.equal(count, 200, 'All rows should be accessible after TRUNCATE checkpoint');
670      } finally {
671        db.close();
672      }
673    });
674  });
675  
676  // ─── createDatabaseConnection integration ────────────────────────────────────
677  // NOTE: These tests were written for the SQLite era of db.js. After the PG migration,
678  // createDatabaseConnection returns a PG pool (not a SQLite Database), so WAL/busy_timeout
679  // pragmas no longer apply. Tests are skipped to preserve test history.
680  
681  describe('createDatabaseConnection concurrency settings', () => {
682    test('production connection has busy_timeout = 15000', { skip: 'SQLite-era test: db.js now uses PostgreSQL pool, not SQLite WAL' }, () => {});
683    test('production connection uses WAL mode', { skip: 'SQLite-era test: db.js now uses PostgreSQL pool, not SQLite WAL' }, () => {});
684    test('production connection has wal_autocheckpoint = 4000', { skip: 'SQLite-era test: db.js now uses PostgreSQL pool, not SQLite WAL' }, () => {});
685    test('two production connections can write concurrently', { skip: 'SQLite-era test: db.js now uses PostgreSQL pool, not SQLite WAL' }, () => {});
686  });