Skip to content

Commit

Permalink
Use COALESCE to avoid possible NULL values from database
Browse files Browse the repository at this point in the history
  • Loading branch information
prathamesh0 committed May 11, 2022
1 parent ecba34c commit 6ae7719
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 33 deletions.
28 changes: 14 additions & 14 deletions statediff/indexer/database/file/indexer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ var (
sqlxdb *sqlx.DB
err error
ind interfaces.StateDiffIndexer
ipfsPgGet = `SELECT data FROM public.blocks
WHERE key = $1`
ipfsPgGet = `SELECT COALESCE(data, '') as data FROM public.blocks
WHERE key = $1 AND block_number = $2`
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
mockBlock *types.Block
headerCID, trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
Expand Down Expand Up @@ -262,7 +262,7 @@ func TestFileIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -301,7 +301,7 @@ func TestFileIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -425,7 +425,7 @@ func TestFileIndexer(t *testing.T) {
}
for i := range rcts {
results := make([]logIPLD, 0)
pgStr = `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
pgStr = `SELECT log_cids.index, log_cids.address, COALESCE(log_cids.topic0, '') as topic0, COALESCE(log_cids.topic1, '') as topic1, data FROM eth.log_cids
INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
Expand Down Expand Up @@ -480,7 +480,7 @@ func TestFileIndexer(t *testing.T) {

for idx, c := range rcts {
result := make([]models.IPLDModel, 0)
pgStr = `SELECT data
pgStr = `SELECT COALESCE(data, '') as data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
WHERE receipt_cids.leaf_cid = $1`
Expand All @@ -506,7 +506,7 @@ func TestFileIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
var data []byte
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -580,7 +580,7 @@ func TestFileIndexer(t *testing.T) {
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
Expand Down Expand Up @@ -641,7 +641,7 @@ func TestFileIndexer(t *testing.T) {
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
Expand All @@ -668,7 +668,7 @@ func TestFileIndexer(t *testing.T) {

// check that storage nodes were properly indexed
storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, COALESCE(storage_cids.storage_path, '') as storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
Expand All @@ -694,15 +694,15 @@ func TestFileIndexer(t *testing.T) {
}
mhKey := dshelp.MultihashToDsKey(dc.Hash())
prefixedKey := blockstore.BlockPrefix.String() + mhKey.String()
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
require.Equal(t, mocks.StorageLeafNode, data)

// check that Removed storage nodes were properly indexed
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, COALESCE(storage_cids.storage_path, '') as storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
Expand Down Expand Up @@ -747,8 +747,8 @@ func TestFileIndexer(t *testing.T) {
}
mhKey = dshelp.MultihashToDsKey(dc.Hash())
prefixedKey = blockstore.BlockPrefix.String() + mhKey.String()
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey)
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey)
require.Equal(t, shared.RemovedNodeMhKey, prefixedKey, mocks.BlockNumber.Uint64())
err = sqlxdb.Get(&data, ipfsPgGet, prefixedKey, mocks.BlockNumber.Uint64())
if err != nil {
t.Fatal(err)
}
Expand Down
2 changes: 1 addition & 1 deletion statediff/indexer/database/sql/indexer_shared_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ var (
db sql.Database
err error
ind interfaces.StateDiffIndexer
ipfsPgGet = `SELECT data FROM public.blocks
ipfsPgGet = `SELECT COALESCE(data, '') as data FROM public.blocks
WHERE key = $1 AND block_number = $2`
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
mockBlock *types.Block
Expand Down
18 changes: 4 additions & 14 deletions statediff/indexer/database/sql/pgx_indexer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ import (
"math/big"
"testing"

"github.com/lib/pq"

"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
Expand Down Expand Up @@ -207,14 +205,6 @@ func TestPGXIndexer(t *testing.T) {
if txRes.Value != transactions[3].Value().String() {
t.Fatalf("expected tx value %s got %s", transactions[3].Value().String(), txRes.Value)
}
// AccessListElementModel is the db model for eth.access_list_entry
type AccessListElementModel struct {
BlockNumber string `db:"block_number"`
Index int64 `db:"index"`
TxID string `db:"tx_id"`
Address string `db:"address"`
StorageKeys pq.StringArray `db:"storage_keys"`
}
accessListElementModels := make([]models.AccessListElementModel, 0)
pgStr = "SELECT cast(access_list_elements.block_number AS TEXT), access_list_elements.index, access_list_elements.tx_id, " +
"access_list_elements.address, access_list_elements.storage_keys FROM eth.access_list_elements " +
Expand Down Expand Up @@ -267,7 +257,7 @@ func TestPGXIndexer(t *testing.T) {
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
logsPgStr := `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
logsPgStr := `SELECT log_cids.index, log_cids.address, COALESCE(log_cids.topic0, '') as topic0, COALESCE(log_cids.topic1, '') as topic1, data FROM eth.log_cids
INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
Expand Down Expand Up @@ -338,7 +328,7 @@ func TestPGXIndexer(t *testing.T) {

for idx, c := range rcts {
result := make([]models.IPLDModel, 0)
pgStr = `SELECT data
pgStr = `SELECT COALESCE(data, '') as data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
WHERE receipt_cids.leaf_cid = $1`
Expand Down Expand Up @@ -525,7 +515,7 @@ func TestPGXIndexer(t *testing.T) {
defer checkTxClosure(t, 1, 0, 1)
// check that storage nodes were properly indexed
storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, COALESCE(storage_cids.storage_path, '') as storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
Expand Down Expand Up @@ -559,7 +549,7 @@ func TestPGXIndexer(t *testing.T) {

// check that Removed storage nodes were properly indexed
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, COALESCE(storage_cids.storage_path, '') as storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
Expand Down
8 changes: 4 additions & 4 deletions statediff/indexer/database/sql/sqlx_indexer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ func TestSQLXIndexer(t *testing.T) {
AND transaction_cids.header_id = header_cids.block_hash
AND header_cids.block_number = $1
ORDER BY transaction_cids.index`
logsPgStr := `SELECT log_cids.index, log_cids.address, log_cids.topic0, log_cids.topic1, data FROM eth.log_cids
logsPgStr := `SELECT log_cids.index, log_cids.address, COALESCE(log_cids.topic0, '') as topic0, COALESCE(log_cids.topic1, '') as topic1, data FROM eth.log_cids
INNER JOIN eth.receipt_cids ON (log_cids.rct_id = receipt_cids.tx_id)
INNER JOIN public.blocks ON (log_cids.leaf_mh_key = blocks.key)
WHERE receipt_cids.leaf_cid = $1 ORDER BY eth.log_cids.index ASC`
Expand Down Expand Up @@ -322,7 +322,7 @@ func TestSQLXIndexer(t *testing.T) {

for idx, c := range rcts {
result := make([]models.IPLDModel, 0)
pgStr = `SELECT data
pgStr = `SELECT COALESCE(data, '') as data
FROM eth.receipt_cids
INNER JOIN public.blocks ON (receipt_cids.leaf_mh_key = public.blocks.key)
WHERE receipt_cids.leaf_cid = $1`
Expand Down Expand Up @@ -508,7 +508,7 @@ func TestSQLXIndexer(t *testing.T) {
defer checkTxClosure(t, 0, 0, 0)
// check that storage nodes were properly indexed
storageNodes := make([]models.StorageNodeWithStateKeyModel, 0)
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
pgStr := `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, COALESCE(storage_cids.storage_path, '') as storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
Expand Down Expand Up @@ -542,7 +542,7 @@ func TestSQLXIndexer(t *testing.T) {

// check that Removed storage nodes were properly indexed
storageNodes = make([]models.StorageNodeWithStateKeyModel, 0)
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, storage_cids.storage_path
pgStr = `SELECT cast(storage_cids.block_number AS TEXT), storage_cids.cid, state_cids.state_leaf_key, storage_cids.storage_leaf_key, storage_cids.node_type, COALESCE(storage_cids.storage_path, '') as storage_path
FROM eth.storage_cids, eth.state_cids, eth.header_cids
WHERE (storage_cids.state_path, storage_cids.header_id) = (state_cids.state_path, state_cids.header_id)
AND state_cids.header_id = header_cids.block_hash
Expand Down

0 comments on commit 6ae7719

Please sign in to comment.