/ blockcache / blockcache_test.go
blockcache_test.go
  1  package blockcache
  2  
  3  import (
  4  	"errors"
  5  	"fmt"
  6  	"sync"
  7  	"testing"
  8  
  9  	"github.com/btcsuite/btcd/btcutil"
 10  	"github.com/btcsuite/btcd/chaincfg/chainhash"
 11  	"github.com/btcsuite/btcd/wire"
 12  	"github.com/lightninglabs/neutrino"
 13  	"github.com/lightninglabs/neutrino/cache"
 14  	"github.com/stretchr/testify/require"
 15  )
 16  
 17  type mockChainBackend struct {
 18  	blocks         map[chainhash.Hash]*wire.MsgBlock
 19  	chainCallCount int
 20  
 21  	sync.RWMutex
 22  }
 23  
 24  func newMockChain() *mockChainBackend {
 25  	return &mockChainBackend{
 26  		blocks: make(map[chainhash.Hash]*wire.MsgBlock),
 27  	}
 28  }
 29  
 30  // GetBlock is a mock implementation of block fetching that tracks the number
 31  // of backend calls and returns the block found for the given hash or an error.
 32  func (m *mockChainBackend) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
 33  	m.Lock()
 34  	defer m.Unlock()
 35  
 36  	m.chainCallCount++
 37  
 38  	block, ok := m.blocks[*blockHash]
 39  	if !ok {
 40  		return nil, fmt.Errorf("block not found")
 41  	}
 42  
 43  	return block, nil
 44  }
 45  
 46  func (m *mockChainBackend) getChainCallCount() int {
 47  	m.RLock()
 48  	defer m.RUnlock()
 49  
 50  	return m.chainCallCount
 51  }
 52  
 53  func (m *mockChainBackend) addBlock(block *wire.MsgBlock, nonce uint32) {
 54  	m.Lock()
 55  	defer m.Unlock()
 56  
 57  	block.Header.Nonce = nonce
 58  	hash := block.Header.BlockHash()
 59  	m.blocks[hash] = block
 60  }
 61  
 62  func (m *mockChainBackend) resetChainCallCount() {
 63  	m.Lock()
 64  	defer m.Unlock()
 65  
 66  	m.chainCallCount = 0
 67  }
 68  
 69  // TestBlockCacheGetBlock tests that the block Cache works correctly as a LFU block
 70  // Cache for the given max capacity.
 71  func TestBlockCacheGetBlock(t *testing.T) {
 72  	mc := newMockChain()
 73  	getBlockImpl := mc.GetBlock
 74  
 75  	block1 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 1}}
 76  	block2 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 2}}
 77  	block3 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 3}}
 78  
 79  	blockhash1 := block1.BlockHash()
 80  	blockhash2 := block2.BlockHash()
 81  	blockhash3 := block3.BlockHash()
 82  
 83  	inv1 := wire.NewInvVect(wire.InvTypeWitnessBlock, &blockhash1)
 84  	inv2 := wire.NewInvVect(wire.InvTypeWitnessBlock, &blockhash2)
 85  	inv3 := wire.NewInvVect(wire.InvTypeWitnessBlock, &blockhash3)
 86  
 87  	// Determine the size of one of the blocks.
 88  	sz, _ := (&neutrino.CacheableBlock{
 89  		Block: btcutil.NewBlock(block1),
 90  	}).Size()
 91  
 92  	// A new Cache is set up with a capacity of 2 blocks
 93  	bc := NewBlockCache(2 * sz)
 94  
 95  	mc.addBlock(&wire.MsgBlock{}, 1)
 96  	mc.addBlock(&wire.MsgBlock{}, 2)
 97  	mc.addBlock(&wire.MsgBlock{}, 3)
 98  
 99  	// We expect the initial Cache to be empty
100  	require.Equal(t, 0, bc.Cache.Len())
101  
102  	// After calling getBlock for block1, it is expected that the Cache
103  	// will have a size of 1 and will contain block1. One chain backends
104  	// call is expected to fetch the block.
105  	_, err := bc.GetBlock(&blockhash1, getBlockImpl)
106  	require.NoError(t, err)
107  	require.Equal(t, 1, bc.Cache.Len())
108  	require.Equal(t, 1, mc.getChainCallCount())
109  	mc.resetChainCallCount()
110  
111  	_, err = bc.Cache.Get(*inv1)
112  	require.NoError(t, err)
113  
114  	// After calling getBlock for block2, it is expected that the Cache
115  	// will have a size of 2 and will contain both block1 and block2.
116  	// One chain backends call is expected to fetch the block.
117  	_, err = bc.GetBlock(&blockhash2, getBlockImpl)
118  	require.NoError(t, err)
119  	require.Equal(t, 2, bc.Cache.Len())
120  	require.Equal(t, 1, mc.getChainCallCount())
121  	mc.resetChainCallCount()
122  
123  	_, err = bc.Cache.Get(*inv1)
124  	require.NoError(t, err)
125  
126  	_, err = bc.Cache.Get(*inv2)
127  	require.NoError(t, err)
128  
129  	// getBlock is called again for block1 to make block2 the LFU block.
130  	// No call to the chain backend is expected since block 1 is already
131  	// in the Cache.
132  	_, err = bc.GetBlock(&blockhash1, getBlockImpl)
133  	require.NoError(t, err)
134  	require.Equal(t, 2, bc.Cache.Len())
135  	require.Equal(t, 0, mc.getChainCallCount())
136  	mc.resetChainCallCount()
137  
138  	// Since the Cache is now at its max capacity, it is expected that when
139  	// getBlock is called for a new block then the LFU block will be
140  	// evicted. It is expected that block2 will be evicted. After calling
141  	// Getblock for block3, it is expected that the Cache will have a
142  	// length of 2 and will contain block 1 and 3.
143  	_, err = bc.GetBlock(&blockhash3, getBlockImpl)
144  	require.NoError(t, err)
145  	require.Equal(t, 2, bc.Cache.Len())
146  	require.Equal(t, 1, mc.getChainCallCount())
147  	mc.resetChainCallCount()
148  
149  	_, err = bc.Cache.Get(*inv1)
150  	require.NoError(t, err)
151  
152  	_, err = bc.Cache.Get(*inv2)
153  	require.True(t, errors.Is(err, cache.ErrElementNotFound))
154  
155  	_, err = bc.Cache.Get(*inv3)
156  	require.NoError(t, err)
157  }
158  
159  // TestBlockCacheMutexes is used to test that concurrent calls to GetBlock with
160  // the same block hash does not result in multiple calls to the chain backend.
161  // In other words this tests the HashMutex.
162  func TestBlockCacheMutexes(t *testing.T) {
163  	mc := newMockChain()
164  	getBlockImpl := mc.GetBlock
165  
166  	block1 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 1}}
167  	block2 := &wire.MsgBlock{Header: wire.BlockHeader{Nonce: 2}}
168  
169  	blockhash1 := block1.BlockHash()
170  	blockhash2 := block2.BlockHash()
171  
172  	// Determine the size of the block.
173  	sz, _ := (&neutrino.CacheableBlock{
174  		Block: btcutil.NewBlock(block1),
175  	}).Size()
176  
177  	// A new Cache is set up with a capacity of 2 blocks
178  	bc := NewBlockCache(2 * sz)
179  
180  	mc.addBlock(&wire.MsgBlock{}, 1)
181  	mc.addBlock(&wire.MsgBlock{}, 2)
182  
183  	// Spin off multiple go routines and ensure that concurrent calls to the
184  	// GetBlock method does not result in multiple calls to the chain
185  	// backend.
186  	var wg sync.WaitGroup
187  	for i := 0; i < 100; i++ {
188  		wg.Add(1)
189  		go func(e int) {
190  			if e%2 == 0 {
191  				_, err := bc.GetBlock(&blockhash1, getBlockImpl)
192  				require.NoError(t, err)
193  			} else {
194  				_, err := bc.GetBlock(&blockhash2, getBlockImpl)
195  				require.NoError(t, err)
196  			}
197  
198  			wg.Done()
199  		}(i)
200  	}
201  
202  	wg.Wait()
203  	require.Equal(t, 2, mc.getChainCallCount())
204  }