/ sqldb / sqlite_bench_test.go
sqlite_bench_test.go
  1  //go:build !js && !(windows && (arm || 386)) && !(linux && (ppc64 || mips || mipsle || mips64))
  2  
  3  package sqldb
  4  
  5  import (
  6  	"context"
  7  	"database/sql"
  8  	"fmt"
  9  	"path/filepath"
 10  	"sync"
 11  	"testing"
 12  	"time"
 13  
 14  	"github.com/lightningnetwork/lnd/sqldb/sqlc"
 15  	"github.com/stretchr/testify/require"
 16  )
 17  
 18  // BenchmarkSqliteMaxConns benchmarks sequential reads against a SQLite
 19  // database with varying MaxConnections settings.
 20  //
 21  // Run with:
 22  //
 23  //	go test -bench=BenchmarkSqliteMaxConns -benchmem -run=^$ ./sqldb/
 24  func BenchmarkSqliteMaxConns(b *testing.B) {
 25  	const numInvoices = 500
 26  
 27  	// connCounts contains the MaxConnections values we want to compare.
 28  	// 0 means "use the library default" (currently 2 for SQLite).
 29  	connCounts := []int{1, 2, 4, 8, 16, 0}
 30  
 31  	// Build a fresh SQLite database that will be shared across all
 32  	// sub-benchmarks. We insert a fixed set of invoices once and then
 33  	// execute read-only queries from multiple goroutines.
 34  	dbFileName := filepath.Join(b.TempDir(), "bench.db")
 35  
 36  	// Open the store once with migrations so the schema is in place.
 37  	setupStore, err := NewSqliteStore(&SqliteConfig{
 38  		SkipMigrations: false,
 39  	}, dbFileName)
 40  	require.NoError(b, err)
 41  
 42  	require.NoError(b, setupStore.ApplyAllMigrations(
 43  		context.Background(), GetMigrations(),
 44  	))
 45  
 46  	ctx := context.Background()
 47  
 48  	// Insert test invoices. We use a predictable hash per invoice so we
 49  	// can look them up deterministically during the benchmark.
 50  	hashes := make([][]byte, numInvoices)
 51  	for i := range numInvoices {
 52  		hash := make([]byte, 32)
 53  		hash[0] = byte(i)
 54  		hash[1] = byte(i >> 8)
 55  		hashes[i] = hash
 56  
 57  		_, err := setupStore.InsertInvoice(
 58  			ctx, sqlc.InsertInvoiceParams{
 59  				Hash:               hash,
 60  				PaymentAddr:        hash,
 61  				PaymentRequestHash: hash,
 62  				Expiry:             3600,
 63  				CreatedAt:          time.Now(),
 64  			},
 65  		)
 66  		require.NoError(b, err)
 67  	}
 68  
 69  	require.NoError(b, setupStore.DB.Close())
 70  
 71  	for _, maxConns := range connCounts {
 72  		name := fmt.Sprintf("MaxConns=%d", maxConns)
 73  		if maxConns == 0 {
 74  			name = fmt.Sprintf("MaxConns=default(%d)",
 75  				DefaultSqliteMaxConns)
 76  		}
 77  
 78  		b.Run(name, func(b *testing.B) {
 79  			store, err := NewSqliteStore(
 80  				&SqliteConfig{
 81  					SkipMigrations: true,
 82  					MaxConnections: maxConns,
 83  				}, dbFileName,
 84  			)
 85  			require.NoError(b, err)
 86  
 87  			b.Cleanup(func() {
 88  				require.NoError(b, store.DB.Close())
 89  			})
 90  
 91  			var i int
 92  			for b.Loop() {
 93  				hash := hashes[i%numInvoices]
 94  				i++
 95  
 96  				_, err := store.GetInvoiceByHash(ctx, hash)
 97  				if err != nil {
 98  					require.ErrorIs(b, err, sql.ErrNoRows)
 99  				}
100  			}
101  		})
102  	}
103  }
104  
105  // BenchmarkSqliteMaxConnsConcurrentReads measures aggregate read throughput
106  // for a fixed level of goroutine concurrency to complement the sequential
107  // benchmark above. Each iteration launches a fixed number of goroutines that
108  // all issue reads simultaneously, directly stressing the connection pool.
109  func BenchmarkSqliteMaxConnsConcurrentReads(b *testing.B) {
110  	const (
111  		numInvoices = 500
112  		goroutines  = 16
113  	)
114  
115  	connCounts := []int{1, 2, 4, 8, 16, 0}
116  
117  	dbFileName := filepath.Join(b.TempDir(), "bench_conc.db")
118  
119  	setupStore, err := NewSqliteStore(&SqliteConfig{
120  		SkipMigrations: false,
121  	}, dbFileName)
122  	require.NoError(b, err)
123  
124  	require.NoError(b, setupStore.ApplyAllMigrations(
125  		context.Background(), GetMigrations(),
126  	))
127  
128  	ctx := context.Background()
129  
130  	hashes := make([][]byte, numInvoices)
131  	for i := range numInvoices {
132  		hash := make([]byte, 32)
133  		hash[0] = byte(i)
134  		hash[1] = byte(i >> 8)
135  		hashes[i] = hash
136  
137  		_, err := setupStore.InsertInvoice(
138  			ctx, sqlc.InsertInvoiceParams{
139  				Hash:               hash,
140  				PaymentAddr:        hash,
141  				PaymentRequestHash: hash,
142  				Expiry:             3600,
143  				CreatedAt:          time.Now(),
144  			},
145  		)
146  		require.NoError(b, err)
147  	}
148  
149  	require.NoError(b, setupStore.DB.Close())
150  
151  	for _, maxConns := range connCounts {
152  		name := fmt.Sprintf("MaxConns=%d", maxConns)
153  		if maxConns == 0 {
154  			name = fmt.Sprintf("MaxConns=default(%d)",
155  				DefaultSqliteMaxConns)
156  		}
157  
158  		b.Run(name, func(b *testing.B) {
159  			store, err := NewSqliteStore(
160  				&SqliteConfig{
161  					SkipMigrations: true,
162  					MaxConnections: maxConns,
163  				}, dbFileName,
164  			)
165  			require.NoError(b, err)
166  
167  			b.Cleanup(func() {
168  				require.NoError(b, store.DB.Close())
169  			})
170  
171  			for b.Loop() {
172  				var wg sync.WaitGroup
173  				wg.Add(goroutines)
174  
175  				for g := range goroutines {
176  					go func() {
177  						defer wg.Done()
178  
179  						hash := hashes[g%numInvoices]
180  						_, err := store.GetInvoiceByHash(
181  							ctx, hash,
182  						)
183  						if err != nil &&
184  							err != sql.ErrNoRows {
185  
186  							b.Errorf("GetInvoice:"+
187  								" %v", err)
188  						}
189  					}()
190  				}
191  
192  				wg.Wait()
193  			}
194  		})
195  	}
196  }