From c9bc603bae6f22d1850e04d37383c7b68082e787 Mon Sep 17 00:00:00 2001 From: Bruce Riley Date: Fri, 6 Sep 2024 10:48:27 -0500 Subject: [PATCH 1/7] Node/EVM: Batch poller publish initial blocks --- node/pkg/watchers/evm/connectors/batch_poller.go | 11 +++++++++++ node/pkg/watchers/evm/connectors/batch_poller_test.go | 5 +++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/node/pkg/watchers/evm/connectors/batch_poller.go b/node/pkg/watchers/evm/connectors/batch_poller.go index adabc05d26..1262350cac 100644 --- a/node/pkg/watchers/evm/connectors/batch_poller.go +++ b/node/pkg/watchers/evm/connectors/batch_poller.go @@ -82,6 +82,17 @@ func (b *BatchPollConnector) SubscribeForBlocks(ctx context.Context, errC chan e errCount := 0 + // Publish the initial finalized and safe blocks so we have a starting point for reobservation requests. + for idx, block := range lastBlocks { + b.logger.Info(fmt.Sprintf("publishing initial %s block", b.batchData[idx].finality), zap.Uint64("initial_block", block.Number.Uint64())) + sink <- block + if b.generateSafe && b.batchData[idx].finality == Finalized { + safe := block.Copy(Safe) + b.logger.Info("publishing generated initial safe block", zap.Uint64("initial_block", safe.Number.Uint64())) + sink <- safe + } + } + common.RunWithScissors(ctx, errC, "block_poll_subscribe_for_blocks", func(ctx context.Context) error { timer := time.NewTimer(b.Delay) defer timer.Stop() diff --git a/node/pkg/watchers/evm/connectors/batch_poller_test.go b/node/pkg/watchers/evm/connectors/batch_poller_test.go index 8f39495fd3..8cf7052290 100644 --- a/node/pkg/watchers/evm/connectors/batch_poller_test.go +++ b/node/pkg/watchers/evm/connectors/batch_poller_test.go @@ -281,12 +281,13 @@ func TestBatchPoller(t *testing.T) { } }() - // First sleep a bit and make sure there were no start up errors and no blocks got published. + // First sleep a bit and make sure there were no start up errors and the initial blocks were published. time.Sleep(10 * time.Millisecond) mutex.Lock() require.NoError(t, publishedErr) require.NoError(t, publishedSubErr) - assert.Nil(t, block) + batchShouldHaveSafeAndFinalizedButNotLatest(t, block, 0x309a0c, baseConnector.expectedHash()) + block = nil mutex.Unlock() // Post the first new block and verify we get it. From 68a7a1ede7b6387e91bce75812e787c50bd77959 Mon Sep 17 00:00:00 2001 From: Evan Gray Date: Mon, 9 Sep 2024 10:27:58 -0400 Subject: [PATCH 2/7] clients/js: switch eth rpc --- clients/js/src/consts/networks.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/js/src/consts/networks.ts b/clients/js/src/consts/networks.ts index 3bf6f7b70c..63c3b729bc 100644 --- a/clients/js/src/consts/networks.ts +++ b/clients/js/src/consts/networks.ts @@ -26,7 +26,7 @@ const Mainnet = { key: getEnvVar("TERRA_MNEMONIC"), }, Ethereum: { - rpc: `https://rpc.ankr.com/eth`, + rpc: `https://ethereum-rpc.publicnode.com`, key: getEnvVar("ETH_KEY"), chain_id: 1, }, From 2a22603d5a812dd72fee6d4110547e9da7eade8f Mon Sep 17 00:00:00 2001 From: Bruce Riley Date: Mon, 9 Sep 2024 12:41:40 -0500 Subject: [PATCH 3/7] Node/CCQ/EVM: Logging changes --- node/pkg/watchers/evm/ccq.go | 76 ++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/node/pkg/watchers/evm/ccq.go b/node/pkg/watchers/evm/ccq.go index 8faa5a4516..9d9e5eb5d1 100644 --- a/node/pkg/watchers/evm/ccq.go +++ b/node/pkg/watchers/evm/ccq.go @@ -143,7 +143,7 @@ func (w *Watcher) ccqHandleEthCallQueryRequest(ctx context.Context, queryRequest // Verify that the block read was successful. if err := w.ccqVerifyBlockResult(blockError, blockResult); err != nil { - w.ccqLogger.Debug("failed to verify block for eth_call query", + w.ccqLogger.Error("failed to verify block for eth_call query", zap.String("requestId", requestId), zap.String("block", block), zap.Any("batch", batch), @@ -153,19 +153,10 @@ func (w *Watcher) ccqHandleEthCallQueryRequest(ctx context.Context, queryRequest return } - w.ccqLogger.Info("query complete for eth_call", - zap.String("requestId", requestId), - zap.String("block", block), - zap.String("blockNumber", blockResult.Number.String()), - zap.String("blockHash", blockResult.Hash.Hex()), - zap.String("blockTime", blockResult.Time.String()), - zap.Int64("duration", time.Since(start).Milliseconds()), - ) - // Verify all the call results and build the batch of results. results, err := w.ccqVerifyAndExtractQueryResults(requestId, evmCallData) if err != nil { - w.ccqLogger.Debug("failed to process eth_call query call request", + w.ccqLogger.Error("failed to process eth_call query call request", zap.String("requestId", requestId), zap.String("block", block), zap.Any("batch", batch), @@ -175,6 +166,15 @@ func (w *Watcher) ccqHandleEthCallQueryRequest(ctx context.Context, queryRequest return } + w.ccqLogger.Info("query complete for eth_call", + zap.String("requestId", requestId), + zap.String("block", block), + zap.String("blockNumber", blockResult.Number.String()), + zap.String("blockHash", blockResult.Hash.Hex()), + zap.String("blockTime", blockResult.Time.String()), + zap.Int64("duration", time.Since(start).Milliseconds()), + ) + // Finally, build the response and publish it. resp := query.EthCallQueryResponse{ BlockNumber: blockResult.Number.ToInt().Uint64(), @@ -388,7 +388,7 @@ func (w *Watcher) ccqHandleEthCallByTimestampQueryRequest(ctx context.Context, q // Verify the following block read was successful. if err := w.ccqVerifyBlockResult(nextBlockError, nextBlockResult); err != nil { - w.ccqLogger.Debug("failed to verify next block for eth_call_by_timestamp query", + w.ccqLogger.Error("failed to verify next block for eth_call_by_timestamp query", zap.String("requestId", requestId), zap.String("block", block), zap.String("nextBlock", nextBlock), @@ -446,6 +446,20 @@ func (w *Watcher) ccqHandleEthCallByTimestampQueryRequest(ctx context.Context, q return } + // Verify all the call results and build the batch of results. + results, err := w.ccqVerifyAndExtractQueryResults(requestId, evmCallData) + if err != nil { + w.ccqLogger.Error("failed to process eth_call_by_timestamp query call request", + zap.String("requestId", requestId), + zap.String("block", block), + zap.String("nextBlock", nextBlock), + zap.Any("batch", batch), + zap.Error(err), + ) + w.ccqSendQueryResponse(queryRequest, query.QueryRetryNeeded, nil) + return + } + w.ccqLogger.Info("query complete for eth_call_by_timestamp", zap.String("requestId", requestId), zap.Uint64("desiredTimestamp", req.TargetTimestamp), @@ -460,20 +474,6 @@ func (w *Watcher) ccqHandleEthCallByTimestampQueryRequest(ctx context.Context, q zap.Int64("duration", time.Since(start).Milliseconds()), ) - // Verify all the call results and build the batch of results. - results, err := w.ccqVerifyAndExtractQueryResults(requestId, evmCallData) - if err != nil { - w.ccqLogger.Debug("failed to process eth_call_by_timestamp query call request", - zap.String("requestId", requestId), - zap.String("block", block), - zap.String("nextBlock", nextBlock), - zap.Any("batch", batch), - zap.Error(err), - ) - w.ccqSendQueryResponse(queryRequest, query.QueryRetryNeeded, nil) - return - } - // Finally, build the response and publish it. resp := query.EthCallByTimestampQueryResponse{ TargetBlockNumber: targetBlockNum, @@ -557,7 +557,7 @@ func (w *Watcher) ccqHandleEthCallWithFinalityQueryRequest(ctx context.Context, // Verify that the block read was successful. if err := w.ccqVerifyBlockResult(blockError, blockResult); err != nil { - w.ccqLogger.Debug("failed to verify block for eth_call_with_finality query", + w.ccqLogger.Error("failed to verify block for eth_call_with_finality query", zap.String("requestId", requestId), zap.String("block", block), zap.Any("batch", batch), @@ -590,20 +590,10 @@ func (w *Watcher) ccqHandleEthCallWithFinalityQueryRequest(ctx context.Context, return } - w.ccqLogger.Info("query complete for eth_call_with_finality", - zap.String("requestId", requestId), - zap.String("finality", req.Finality), - zap.Uint64("requestedBlockNumber", blockNumber), - zap.Uint64("latestBlockNumber", latestBlockNum), - zap.String("blockHash", blockResult.Hash.Hex()), - zap.String("blockTime", blockResult.Time.String()), - zap.Int64("duration", time.Since(start).Milliseconds()), - ) - // Verify all the call results and build the batch of results. results, err := w.ccqVerifyAndExtractQueryResults(requestId, evmCallData) if err != nil { - w.ccqLogger.Debug("failed to process eth_call_with_finality query call request", + w.ccqLogger.Error("failed to process eth_call_with_finality query call request", zap.String("requestId", requestId), zap.String("finality", req.Finality), zap.Uint64("requestedBlockNumber", blockNumber), @@ -616,6 +606,16 @@ func (w *Watcher) ccqHandleEthCallWithFinalityQueryRequest(ctx context.Context, return } + w.ccqLogger.Info("query complete for eth_call_with_finality", + zap.String("requestId", requestId), + zap.String("finality", req.Finality), + zap.Uint64("requestedBlockNumber", blockNumber), + zap.Uint64("latestBlockNumber", latestBlockNum), + zap.String("blockHash", blockResult.Hash.Hex()), + zap.String("blockTime", blockResult.Time.String()), + zap.Int64("duration", time.Since(start).Milliseconds()), + ) + // Finally, build the response and publish it. resp := query.EthCallWithFinalityQueryResponse{ BlockNumber: blockNumber, From 6b810acbecf67e1bb8a663db97dc352e13589529 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maxwell=20=22=EA=93=98=22=20Dulin?= Date: Tue, 10 Sep 2024 08:58:02 -0700 Subject: [PATCH 4/7] Lower governor limits of chains based upon usage (#4102) * Lower governor limits of chains based upon usage * Change Polygon and Avalanche back to 5M * Fix Governor tests that broke with 0 value in dailyLimit --------- Co-authored-by: Maxwell Dulin Co-authored-by: Maxwell Dulin --- node/pkg/governor/mainnet_chains.go | 22 +++++++++++----------- node/pkg/governor/mainnet_chains_test.go | 16 +++++++++++++--- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/node/pkg/governor/mainnet_chains.go b/node/pkg/governor/mainnet_chains.go index e6e4dc597c..c2dce4defe 100644 --- a/node/pkg/governor/mainnet_chains.go +++ b/node/pkg/governor/mainnet_chains.go @@ -12,33 +12,33 @@ func chainList() []chainConfigEntry { return []chainConfigEntry{ {emitterChainID: vaa.ChainIDSolana, dailyLimit: 25_000_000, bigTransactionSize: 2_500_000}, {emitterChainID: vaa.ChainIDEthereum, dailyLimit: 50_000_000, bigTransactionSize: 5_000_000}, - {emitterChainID: vaa.ChainIDTerra, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDTerra, dailyLimit: 150_000, bigTransactionSize: 15_000}, {emitterChainID: vaa.ChainIDBSC, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, {emitterChainID: vaa.ChainIDPolygon, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, {emitterChainID: vaa.ChainIDAvalanche, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, - {emitterChainID: vaa.ChainIDOasis, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDOasis, dailyLimit: 250_000, bigTransactionSize: 25_000}, {emitterChainID: vaa.ChainIDAlgorand, dailyLimit: 1_000_000, bigTransactionSize: 100_000}, - {emitterChainID: vaa.ChainIDAurora, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDAurora, dailyLimit: 0, bigTransactionSize: 0}, {emitterChainID: vaa.ChainIDFantom, dailyLimit: 500_000, bigTransactionSize: 50_000}, - {emitterChainID: vaa.ChainIDKarura, dailyLimit: 500_000, bigTransactionSize: 50_000}, - {emitterChainID: vaa.ChainIDAcala, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDKarura, dailyLimit: 150_000, bigTransactionSize: 15_000}, + {emitterChainID: vaa.ChainIDAcala, dailyLimit: 100_000, bigTransactionSize: 10_000}, {emitterChainID: vaa.ChainIDKlaytn, dailyLimit: 500_000, bigTransactionSize: 50_000}, {emitterChainID: vaa.ChainIDCelo, dailyLimit: 2_000_000, bigTransactionSize: 200_000}, - {emitterChainID: vaa.ChainIDNear, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDNear, dailyLimit: 150_000, bigTransactionSize: 15_000}, {emitterChainID: vaa.ChainIDMoonbeam, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, - {emitterChainID: vaa.ChainIDTerra2, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDTerra2, dailyLimit: 100_000, bigTransactionSize: 10_000}, {emitterChainID: vaa.ChainIDInjective, dailyLimit: 500_000, bigTransactionSize: 50_000}, {emitterChainID: vaa.ChainIDSui, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, {emitterChainID: vaa.ChainIDAptos, dailyLimit: 1_000_000, bigTransactionSize: 100_000}, {emitterChainID: vaa.ChainIDArbitrum, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, {emitterChainID: vaa.ChainIDOptimism, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, - {emitterChainID: vaa.ChainIDXpla, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDXpla, dailyLimit: 50_000, bigTransactionSize: 5_000}, {emitterChainID: vaa.ChainIDBase, dailyLimit: 2_000_000, bigTransactionSize: 200_000}, {emitterChainID: vaa.ChainIDSei, dailyLimit: 5_000_000, bigTransactionSize: 500_000}, {emitterChainID: vaa.ChainIDScroll, dailyLimit: 500_000, bigTransactionSize: 50_000}, - {emitterChainID: vaa.ChainIDMantle, dailyLimit: 500_000, bigTransactionSize: 50_000}, - {emitterChainID: vaa.ChainIDBlast, dailyLimit: 500_000, bigTransactionSize: 50_000}, - {emitterChainID: vaa.ChainIDXLayer, dailyLimit: 500_000, bigTransactionSize: 50_000}, + {emitterChainID: vaa.ChainIDMantle, dailyLimit: 100_000, bigTransactionSize: 10_000}, + {emitterChainID: vaa.ChainIDBlast, dailyLimit: 100_000, bigTransactionSize: 10_000}, + {emitterChainID: vaa.ChainIDXLayer, dailyLimit: 100_000, bigTransactionSize: 10_000}, {emitterChainID: vaa.ChainIDWormchain, dailyLimit: 500_000, bigTransactionSize: 50_000}, {emitterChainID: vaa.ChainIDSnaxchain, dailyLimit: 500_000, bigTransactionSize: 50_000}, } diff --git a/node/pkg/governor/mainnet_chains_test.go b/node/pkg/governor/mainnet_chains_test.go index 71e5c8aeb0..5607c8a9c8 100644 --- a/node/pkg/governor/mainnet_chains_test.go +++ b/node/pkg/governor/mainnet_chains_test.go @@ -20,8 +20,11 @@ func TestChainListSize(t *testing.T) { func TestChainDailyLimitRange(t *testing.T) { chainConfigEntries := chainList() - /* This IS a hard limit, if daily limit is set to zero it would - basically mean no value movement is allowed for that chain*/ + /* + If a chain is deprecated, we want to make sure its still governed + in the case that it is used. This will effectively stall all + transfers for 24 hours on a deprecated chain. + */ min_daily_limit := uint64(0) /* This IS NOT a hard limit, we can adjust it up as we see fit, @@ -36,7 +39,7 @@ func TestChainDailyLimitRange(t *testing.T) { /* Assuming that a governed chains should always be more than zero and less than 50,000,001 */ for _, chainConfigEntry := range chainConfigEntries { t.Run(chainConfigEntry.emitterChainID.String(), func(t *testing.T) { - assert.Greater(t, chainConfigEntry.dailyLimit, min_daily_limit) + assert.GreaterOrEqual(t, chainConfigEntry.dailyLimit, min_daily_limit) assert.Less(t, chainConfigEntry.dailyLimit, max_daily_limit) }) } @@ -62,6 +65,13 @@ func TestChainListBigTransfers(t *testing.T) { chainConfigEntries := chainList() for _, e := range chainConfigEntries { + + // If the daily limit is 0 then both the big TX and daily limit should be 0. + if e.dailyLimit == 0 { + assert.Equal(t, e.bigTransactionSize, e.dailyLimit) + continue + } + // it's always ideal to have bigTransactionSize be less than dailyLimit assert.Less(t, e.bigTransactionSize, e.dailyLimit) From 3652be89c92b920ba427d2592587e695691e8f72 Mon Sep 17 00:00:00 2001 From: Joel Smith Date: Mon, 16 Sep 2024 11:41:45 -0500 Subject: [PATCH 5/7] Fix incorrectly merged db_test.go --- node/pkg/db/db_test.go | 368 +++++++++++++++++++++++++++++------------ 1 file changed, 264 insertions(+), 104 deletions(-) diff --git a/node/pkg/db/db_test.go b/node/pkg/db/db_test.go index e58f7dfc04..543d1b6cd6 100644 --- a/node/pkg/db/db_test.go +++ b/node/pkg/db/db_test.go @@ -1,130 +1,290 @@ -package processor +package db import ( - "encoding/hex" - "time" - - "github.com/mr-tron/base58" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + "bytes" + "crypto/ecdsa" + "crypto/rand" + "fmt" + math_rand "math/rand" + "os" + "runtime" + "sync" + "sync/atomic" - ethCommon "github.com/ethereum/go-ethereum/common" + "github.com/dgraph-io/badger/v3" "github.com/ethereum/go-ethereum/crypto" + "github.com/wormhole-foundation/wormhole/sdk/vaa" "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "github.com/certusone/wormhole/node/pkg/common" - "github.com/wormhole-foundation/wormhole/sdk/vaa" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -var ( - // SECURITY: source_chain/target_chain are untrusted uint8 values. An attacker could cause a maximum of 255**2 label - // pairs to be created, which is acceptable. +func getVAA() vaa.VAA { + return getVAAWithSeqNum(1) +} - messagesObservedTotal = promauto.NewCounterVec( - prometheus.CounterOpts{ - Name: "wormhole_message_observations_total", - Help: "Total number of messages observed", - }, - []string{"emitter_chain"}) -) +func getVAAWithSeqNum(seqNum uint64) vaa.VAA { + var payload = []byte{97, 97, 97, 97, 97, 97} + var governanceEmitter = vaa.Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4} + + return vaa.VAA{ + Version: uint8(1), + GuardianSetIndex: uint32(1), + Signatures: nil, + Timestamp: time.Unix(0, 0), + Nonce: uint32(1), + Sequence: seqNum, + ConsistencyLevel: uint8(32), + EmitterChain: vaa.ChainIDSolana, + EmitterAddress: governanceEmitter, + Payload: payload, + } +} + +// Testing the expected default behavior of a CreateGovernanceVAA +func TestVaaIDFromString(t *testing.T) { + vaaIdString := "1/0000000000000000000000000000000000000000000000000000000000000004/1" + vaaID, _ := VaaIDFromString(vaaIdString) + expectAddr := vaa.Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4} + + assert.Equal(t, vaa.ChainIDSolana, vaaID.EmitterChain) + assert.Equal(t, expectAddr, vaaID.EmitterAddress) + assert.Equal(t, uint64(1), vaaID.Sequence) +} + +func TestVaaIDFromVAA(t *testing.T) { + testVaa := getVAA() + vaaID := VaaIDFromVAA(&testVaa) + expectAddr := vaa.Address{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4} + + assert.Equal(t, vaa.ChainIDSolana, vaaID.EmitterChain) + assert.Equal(t, expectAddr, vaaID.EmitterAddress) + assert.Equal(t, uint64(1), vaaID.Sequence) +} + +func TestBytes(t *testing.T) { + vaaIdString := "1/0000000000000000000000000000000000000000000000000000000000000004/1" + vaaID, _ := VaaIDFromString(vaaIdString) + expected := []byte{0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x2f, 0x31, 0x2f, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x34, 0x2f, 0x31} + + assert.Equal(t, expected, vaaID.Bytes()) +} + +func TestEmitterPrefixBytesWithChainIDAndAddress(t *testing.T) { + vaaIdString := "1/0000000000000000000000000000000000000000000000000000000000000004/1" + vaaID, _ := VaaIDFromString(vaaIdString) + expected := []byte{0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x2f, 0x31, 0x2f, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x34} + + assert.Equal(t, expected, vaaID.EmitterPrefixBytes()) +} + +func TestEmitterPrefixBytesWithOnlyChainID(t *testing.T) { + vaaID := VAAID{EmitterChain: vaa.ChainID(26)} + assert.Equal(t, []byte("signed/26"), vaaID.EmitterPrefixBytes()) +} + +func TestStoreSignedVAAUnsigned(t *testing.T) { + dbPath := t.TempDir() + db := OpenDb(zap.NewNop(), &dbPath) + defer db.Close() + defer os.Remove(dbPath) + + testVaa := getVAA() + + // Should panic because the VAA is not signed + assert.Panics(t, func() { db.StoreSignedVAA(&testVaa) }, "The code did not panic") //nolint:errcheck +} + +func TestStoreSignedVAASigned(t *testing.T) { + dbPath := t.TempDir() + db := OpenDb(zap.NewNop(), &dbPath) + defer db.Close() + defer os.Remove(dbPath) + + testVaa := getVAA() + + privKey, _ := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + testVaa.AddSignature(privKey, 0) + + err2 := db.StoreSignedVAA(&testVaa) + assert.NoError(t, err2) +} + +func TestStoreSignedVAABatch(t *testing.T) { + dbPath := t.TempDir() + db := OpenDb(zap.NewNop(), &dbPath) + defer db.Close() + defer os.Remove(dbPath) -// handleMessage processes a message received from a chain and instantiates our deterministic copy of the VAA. An -// event may be received multiple times and must be handled in an idempotent fashion. -func (p *Processor) handleMessage(k *common.MessagePublication) { - if p.gs == nil { - p.logger.Warn("dropping observation since we haven't initialized our guardian set yet", - zap.String("message_id", k.MessageIDString()), - zap.Uint32("nonce", k.Nonce), - zap.Stringer("txhash", k.TxHash), - zap.Time("timestamp", k.Timestamp), - ) - return + privKey, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + require.NoError(t, err) + + require.Less(t, int64(0), db.db.MaxBatchCount()) // In testing this was 104857. + require.Less(t, int64(0), db.db.MaxBatchSize()) // In testing this was 10066329. + + // Make sure we exceed the max batch size. + numVAAs := uint64(db.db.MaxBatchCount() + 1) + + // Build the VAA batch. + vaaBatch := make([]*vaa.VAA, 0, numVAAs) + for seqNum := uint64(0); seqNum < numVAAs; seqNum++ { + v := getVAAWithSeqNum(seqNum) + v.AddSignature(privKey, 0) + vaaBatch = append(vaaBatch, &v) } - messagesObservedTotal.WithLabelValues(k.EmitterChain.String()).Inc() - - // All nodes will create the exact same VAA and sign its digest. - // Consensus is established on this digest. - - v := &VAA{ - VAA: vaa.VAA{ - Version: vaa.SupportedVAAVersion, - GuardianSetIndex: p.gs.Index, - Signatures: nil, - Timestamp: k.Timestamp, - Nonce: k.Nonce, - EmitterChain: k.EmitterChain, - EmitterAddress: k.EmitterAddress, - Payload: k.Payload, - Sequence: k.Sequence, - ConsistencyLevel: k.ConsistencyLevel, - }, - Unreliable: k.Unreliable, - Reobservation: k.IsReobservation, + // Store the batch in the database. + err = db.StoreSignedVAABatch(vaaBatch) + require.NoError(t, err) + + // Verify all the VAAs are in the database. + for _, v := range vaaBatch { + storedBytes, err := db.GetSignedVAABytes(*VaaIDFromVAA(v)) + require.NoError(t, err) + + origBytes, err := v.Marshal() + require.NoError(t, err) + + assert.True(t, bytes.Equal(origBytes, storedBytes)) + } + + // Verify that updates work as well by tweaking the VAAs and rewriting them. + for _, v := range vaaBatch { + v.Nonce += 1 } - // Generate digest of the unsigned VAA. - digest := v.SigningDigest() - hash := hex.EncodeToString(digest.Bytes()) + // Store the updated batch in the database. + err = db.StoreSignedVAABatch(vaaBatch) + require.NoError(t, err) - // Sign the digest using our node's guardian key. - signature, err := crypto.Sign(digest.Bytes(), p.gk) - if err != nil { - panic(err) + // Verify all the updated VAAs are in the database. + for _, v := range vaaBatch { + storedBytes, err := db.GetSignedVAABytes(*VaaIDFromVAA(v)) + require.NoError(t, err) + + origBytes, err := v.Marshal() + require.NoError(t, err) + + assert.True(t, bytes.Equal(origBytes, storedBytes)) + } +} + +func TestGetSignedVAABytes(t *testing.T) { + dbPath := t.TempDir() + db := OpenDb(zap.NewNop(), &dbPath) + defer db.Close() + defer os.Remove(dbPath) + + testVaa := getVAA() + + vaaID := VaaIDFromVAA(&testVaa) + + privKey, _ := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + testVaa.AddSignature(privKey, 0) + + // Store full VAA + err2 := db.StoreSignedVAA(&testVaa) + assert.NoError(t, err2) + + // Retrieve it using vaaID + vaaBytes, err2 := db.GetSignedVAABytes(*vaaID) + assert.NoError(t, err2) + + testVaaBytes, err3 := testVaa.Marshal() + assert.NoError(t, err3) + + assert.Equal(t, testVaaBytes, vaaBytes) +} + +func TestFindEmitterSequenceGap(t *testing.T) { + dbPath := t.TempDir() + db := OpenDb(zap.NewNop(), &dbPath) + defer db.Close() + defer os.Remove(dbPath) + + testVaa := getVAA() + + vaaID := VaaIDFromVAA(&testVaa) + + privKey, _ := ecdsa.GenerateKey(crypto.S256(), rand.Reader) + testVaa.AddSignature(privKey, 0) + + // Store full VAA + err2 := db.StoreSignedVAA(&testVaa) + assert.NoError(t, err2) + + resp, firstSeq, lastSeq, err := db.FindEmitterSequenceGap(*vaaID) + + assert.Equal(t, []uint64{0x0}, resp) + assert.Equal(t, uint64(0x0), firstSeq) + assert.Equal(t, uint64(0x1), lastSeq) + assert.NoError(t, err) +} + +// BenchmarkVaaLookup benchmarks db.GetSignedVAABytes +// You need to set the environment variable WH_DBPATH to a path with a populated BadgerDB. +// You may want to play with the CONCURRENCY parameter. +func BenchmarkVaaLookup(b *testing.B) { + CONCURRENCY := runtime.NumCPU() + dbPath := os.Getenv("WH_DBPATH") + require.NotEqual(b, dbPath, "") + + // open DB + optionsDB := badger.DefaultOptions(dbPath) + optionsDB.Logger = nil + badgerDb, err := badger.Open(optionsDB) + require.NoError(b, err) + db := &Database{ + db: badgerDb, } - shouldPublishImmediately := p.shouldPublishImmediately(&v.VAA) - - if p.logger.Core().Enabled(zapcore.DebugLevel) { - p.logger.Debug("observed and signed confirmed message publication", - zap.String("message_id", k.MessageIDString()), - zap.Stringer("txhash", k.TxHash), - zap.String("txhash_b58", base58.Encode(k.TxHash.Bytes())), - zap.String("hash", hash), - zap.Uint32("nonce", k.Nonce), - zap.Time("timestamp", k.Timestamp), - zap.Uint8("consistency_level", k.ConsistencyLevel), - zap.String("signature", hex.EncodeToString(signature)), - zap.Bool("shouldPublishImmediately", shouldPublishImmediately), - zap.Bool("isReobservation", k.IsReobservation), - ) + if err != nil { + b.Error("failed to open database") } + defer db.Close() + + vaaIds := make(chan *VAAID, b.N) - // Broadcast the signature. - ourObs, msg := p.broadcastSignature(v.MessageID(), k.TxHash.Bytes(), digest, signature, shouldPublishImmediately) + for i := 0; i < b.N; i++ { + randId := math_rand.Intn(250000) //nolint + randId = 250000 - (i / 18) + vaaId, err := VaaIDFromString(fmt.Sprintf("4/000000000000000000000000b6f6d86a8f9879a9c87f643768d9efc38c1da6e7/%d", randId)) + assert.NoError(b, err) + vaaIds <- vaaId + } - // Indicate that we observed this one. - observationsReceivedTotal.Inc() - observationsReceivedByGuardianAddressTotal.WithLabelValues(p.ourAddr.Hex()).Inc() + b.ResetTimer() - // Get / create our state entry. - s := p.state.signatures[hash] - if s == nil { - s = &state{ - firstObserved: time.Now(), - nextRetry: time.Now().Add(nextRetryDuration(0)), - signatures: map[ethCommon.Address][]byte{}, - source: "loopback", - } + // actual timed code + var errCtr atomic.Int32 + var wg sync.WaitGroup - p.state.signatures[hash] = s + for i := 0; i < CONCURRENCY; i++ { + wg.Add(1) + go func() { + for { + select { + case vaaId := <-vaaIds: + _, err = db.GetSignedVAABytes(*vaaId) + if err != nil { + fmt.Printf("error retrieving %s/%s/%d: %s\n", vaaId.EmitterChain, vaaId.EmitterAddress, vaaId.Sequence, err) + errCtr.Add(1) + } + default: + wg.Done() + return + } + } + }() } - // Update our state. - s.ourObservation = v - s.txHash = k.TxHash.Bytes() - s.source = v.GetEmitterChain().String() - s.gs = p.gs // guaranteed to match ourObservation - there's no concurrent access to p.gs - s.signatures[p.ourAddr] = signature - s.ourObs = ourObs - s.ourMsg = msg - - // Fast path for our own signature. - if !s.submitted { - start := time.Now() - p.checkForQuorum(ourObs, s, s.gs, hash) - timeToHandleObservation.Observe(float64(time.Since(start).Microseconds())) + wg.Wait() + + if int(errCtr.Load()) > b.N/3 { + b.Error("More than 1/3 of GetSignedVAABytes failed.") } } From 91ec4d1dc01f8b690f0492815407505fb4587520 Mon Sep 17 00:00:00 2001 From: Sebastian Banescu Date: Mon, 16 Sep 2024 17:27:46 +0200 Subject: [PATCH 6/7] Update bug bounty reward limit in SECURITY.md The max reward on immunefi is $5M --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index 2bb4fe6f8e..937e5d144e 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -54,7 +54,7 @@ The Wormhole project operates a bug bounty program to financially incentivize in - [Immunefi-Hosted Program](https://immunefi.com/bounty/wormhole/) - **Scopes**: Guardian and Smart Contracts - - **Rewards**: Up to $2,500,000 USDC + - **Rewards**: Up to $5,000,000 USDC - **KYC**: Required If you find a security issue in Wormhole, please report the issue immediately using the bug bounty program above. From 8e21c71695dbe1d50cf730fe4618c006c4f895ce Mon Sep 17 00:00:00 2001 From: Kaku <105181329+kakucodes@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:14:34 -0500 Subject: [PATCH 7/7] cosmwasm: add wormchain ibc receiver tests (#81) * cosmwasm: added rust tests to wormchain-ibc-receiver * update tests to use the execute method instead of the parse_vaa fn directly * fixing clippy errors * update spellcheck/dictionary based on joel's fix * fixed spelling errors in 2 files --- cosmwasm/Cargo.lock | 3 + .../wormchain-ibc-receiver/Cargo.toml | 6 + .../wormchain-ibc-receiver/src/lib.rs | 3 + .../src/tests/integration_tests.rs | 362 ++++++++++++++++++ .../wormchain-ibc-receiver/src/tests/mod.rs | 2 + .../src/tests/test_utils.rs | 66 ++++ cspell-custom-words.txt | 56 +-- ...ing-to-latest-cosmos-sdk-upstream-v0-45.md | 2 +- 8 files changed, 474 insertions(+), 26 deletions(-) create mode 100644 cosmwasm/contracts/wormchain-ibc-receiver/src/tests/integration_tests.rs create mode 100644 cosmwasm/contracts/wormchain-ibc-receiver/src/tests/mod.rs create mode 100644 cosmwasm/contracts/wormchain-ibc-receiver/src/tests/test_utils.rs diff --git a/cosmwasm/Cargo.lock b/cosmwasm/Cargo.lock index 2c3fc20235..1a4f2f1567 100644 --- a/cosmwasm/Cargo.lock +++ b/cosmwasm/Cargo.lock @@ -2597,8 +2597,11 @@ dependencies = [ "anyhow", "cosmwasm-schema", "cosmwasm-std", + "cw-multi-test", "cw-storage-plus 0.13.4", "semver", + "serde", + "serde-json-wasm 0.4.1", "serde_wormhole", "thiserror", "wormhole-bindings", diff --git a/cosmwasm/contracts/wormchain-ibc-receiver/Cargo.toml b/cosmwasm/contracts/wormchain-ibc-receiver/Cargo.toml index 778fe83edb..58c6be3054 100644 --- a/cosmwasm/contracts/wormchain-ibc-receiver/Cargo.toml +++ b/cosmwasm/contracts/wormchain-ibc-receiver/Cargo.toml @@ -21,3 +21,9 @@ thiserror = "1.0.31" wormhole-bindings = "0.1.0" wormhole-sdk = { workspace = true, features = ["schemars"] } serde_wormhole.workspace = true + +[dev-dependencies] +cw-multi-test = "0.13.2" +serde-json-wasm = "0.4" +wormhole-bindings = { version = "0.1.0", features=["fake"] } +serde = { version = "1.0.137", default-features = false, features = ["derive"] } \ No newline at end of file diff --git a/cosmwasm/contracts/wormchain-ibc-receiver/src/lib.rs b/cosmwasm/contracts/wormchain-ibc-receiver/src/lib.rs index 10d266d8f4..31d3febfdc 100644 --- a/cosmwasm/contracts/wormchain-ibc-receiver/src/lib.rs +++ b/cosmwasm/contracts/wormchain-ibc-receiver/src/lib.rs @@ -3,3 +3,6 @@ pub mod error; pub mod ibc; pub mod msg; pub mod state; + +#[cfg(test)] +pub mod tests; diff --git a/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/integration_tests.rs b/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/integration_tests.rs new file mode 100644 index 0000000000..86210bfe20 --- /dev/null +++ b/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/integration_tests.rs @@ -0,0 +1,362 @@ +use crate::{ + contract::{execute, query}, + msg::{AllChannelChainsResponse, ExecuteMsg, QueryMsg}, + tests::test_utils::{create_gov_vaa_body, create_transfer_vaa_body, sign_vaa_body}, +}; +use anyhow::Error; +use cosmwasm_std::{ + from_binary, + testing::{mock_env, mock_info, MockApi, MockQuerier, MockStorage}, + to_binary, Binary, ContractResult, Deps, DepsMut, Empty, QuerierWrapper, SystemResult, +}; +use wormhole_bindings::{fake::WormholeKeeper, WormholeQuery}; +use wormhole_sdk::{ + ibc_receiver::{Action, GovernancePacket}, + vaa::Body, + Chain, GOVERNANCE_EMITTER, +}; + +#[test] +pub fn add_channel_chain_happy_path() -> anyhow::Result<(), Error> { + let wh = WormholeKeeper::new(); + + let querier: MockQuerier = + MockQuerier::new(&[]).with_custom_handler(|q| match q { + WormholeQuery::VerifyVaa { vaa } => { + match WormholeKeeper::new().verify_vaa(&vaa.0, 0u64) { + Ok(_) => SystemResult::Ok(if let Ok(data) = to_binary(&Empty {}) { + ContractResult::Ok(data) + } else { + ContractResult::Err("Unable to convert to binary".to_string()) + }), + Err(e) => SystemResult::Ok(ContractResult::Err(e.to_string())), + } + } + _ => cosmwasm_std::SystemResult::Ok(cosmwasm_std::ContractResult::Ok( + to_binary(&Empty {}).unwrap(), + )), + }); + + let mut mut_deps = DepsMut { + storage: &mut MockStorage::default(), + api: &MockApi::default(), + querier: QuerierWrapper::new(&querier), + }; + let info = mock_info("sender", &[]); + let env = mock_env(); + + let add_sei_channel_body = create_gov_vaa_body(1, Chain::Sei, *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-0"); + let (_, add_sei_vaa_binary) = sign_vaa_body(wh.clone(), add_sei_channel_body); + + let submissions = execute( + mut_deps.branch(), + env.clone(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_sei_vaa_binary], + }, + ); + + assert!( + submissions.is_ok(), + "A proper UpdateChannelChain gov vaa should be accepted" + ); + + // create a readonly deps to use for querying the state + let empty_mock_querier = MockQuerier::::new(&[]); + let readonly_deps = Deps { + storage: mut_deps.storage, + api: mut_deps.api, + querier: QuerierWrapper::new(&empty_mock_querier), + }; + + let channel_binary = query(readonly_deps, env, QueryMsg::AllChannelChains {})?; + let channel: AllChannelChainsResponse = from_binary(&channel_binary)?; + + assert_eq!(channel.channels_chains.len(), 1); + let channel_entry = channel.channels_chains.first().unwrap(); + assert_eq!( + channel_entry.0, + Binary::from(*b"channel-0"), + "the stored channel for sei should initially be channel-0" + ); + assert_eq!( + channel_entry.1, + Into::::into(Chain::Sei), + "the stored channel should be for sei's chain id" + ); + + Ok(()) +} + +#[test] +pub fn add_channel_chain_happy_path_multiple() -> anyhow::Result<(), Error> { + let wh = WormholeKeeper::new(); + + let querier: MockQuerier = + MockQuerier::new(&[]).with_custom_handler(|q| match q { + WormholeQuery::VerifyVaa { vaa } => { + match WormholeKeeper::new().verify_vaa(&vaa.0, 0u64) { + Ok(_) => SystemResult::Ok(if let Ok(data) = to_binary(&Empty {}) { + ContractResult::Ok(data) + } else { + ContractResult::Err("Unable to convert to binary".to_string()) + }), + Err(e) => SystemResult::Ok(ContractResult::Err(e.to_string())), + } + } + _ => cosmwasm_std::SystemResult::Ok(cosmwasm_std::ContractResult::Ok( + to_binary(&Empty {}).unwrap(), + )), + }); + + let mut mut_deps = DepsMut { + storage: &mut MockStorage::default(), + api: &MockApi::default(), + querier: QuerierWrapper::new(&querier), + }; + let info = mock_info("sender", &[]); + + let add_inj_channel_body = create_gov_vaa_body(2, Chain::Injective, *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-1"); + let (_, add_inj_vaa_bin) = sign_vaa_body(wh.clone(), add_inj_channel_body); + let add_sei_channel_body = create_gov_vaa_body(3, Chain::Sei, *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-2"); + let (_, add_sei_vaa_binary) = sign_vaa_body(wh.clone(), add_sei_channel_body); + + // add a channel for injective and update the channel set for sei + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_sei_vaa_binary, add_inj_vaa_bin], + }, + ); + + assert!( + submissions.is_ok(), + "A pair of proper UpdateChannelChain gov vaas should be accepted" + ); + + // create a readonly deps to use for querying the state + let empty_mock_querier = MockQuerier::::new(&[]); + let readonly_deps = Deps { + storage: mut_deps.storage, + api: mut_deps.api, + querier: QuerierWrapper::new(&empty_mock_querier), + }; + + // refetch all the channels that are in state + let channel_binary = query(readonly_deps, mock_env(), QueryMsg::AllChannelChains {})?; + let AllChannelChainsResponse { + channels_chains: mut channels, + }: AllChannelChainsResponse = from_binary(&channel_binary)?; + + channels.sort_by(|(_, a_chain_id), (_, b_chain_id)| a_chain_id.cmp(b_chain_id)); + + assert_eq!(channels.len(), 2); + + let channel_entry = channels.first().unwrap(); + assert_eq!( + channel_entry.0, + Binary::from(*b"channel-1"), + "the stored channel should be channel-1 " + ); + assert_eq!( + channel_entry.1, + Into::::into(Chain::Injective), + "the stored channel should be for injective's chain id" + ); + + let channel_entry = channels.last().unwrap(); + assert_eq!( + channel_entry.0, + Binary::from(*b"channel-2"), + "the stored channel should be channel-2" + ); + assert_eq!( + channel_entry.1, + Into::::into(Chain::Sei), + "the stored channel should be for sei's chain id" + ); + + Ok(()) +} + +#[test] +pub fn reject_invalid_add_channel_chain_vaas() { + let wh = WormholeKeeper::new(); + + let querier: MockQuerier = + MockQuerier::new(&[]).with_custom_handler(|q| match q { + WormholeQuery::VerifyVaa { vaa } => { + match WormholeKeeper::new().verify_vaa(&vaa.0, 0u64) { + Ok(_) => SystemResult::Ok(if let Ok(data) = to_binary(&Empty {}) { + ContractResult::Ok(data) + } else { + ContractResult::Err("Unable to convert to binary".to_string()) + }), + Err(e) => SystemResult::Ok(ContractResult::Err(e.to_string())), + } + } + _ => cosmwasm_std::SystemResult::Ok(cosmwasm_std::ContractResult::Ok( + to_binary(&Empty {}).unwrap(), + )), + }); + + let mut mut_deps = DepsMut { + storage: &mut MockStorage::default(), + api: &MockApi::default(), + querier: QuerierWrapper::new(&querier), + }; + let info = mock_info("sender", &[]); + + let add_channel_body = create_gov_vaa_body(1, Chain::Wormchain, *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-0"); + let (_, add_vaa_binary) = sign_vaa_body(wh.clone(), add_channel_body); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_vaa_binary], + }, + ); + + assert!( + submissions.is_err(), + "Cannot add a channel from Gateway to Gateway" + ); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![Binary::from(vec![0u8; 32])], + }, + ); + + assert!( + submissions.is_err(), + "VAA should be rejected if it cannot be parsed because it's too short" + ); + + let add_channel_body = create_transfer_vaa_body(1); + let (_, add_vaa_binary) = sign_vaa_body(wh.clone(), add_channel_body); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_vaa_binary], + }, + ); + + assert!(submissions.is_err(), "Can only execute governance vaas"); + + let add_channel_body = create_gov_vaa_body(1, Chain::Osmosis, *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-0"); + let (_, add_vaa_binary) = sign_vaa_body(wh.clone(), add_channel_body); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_vaa_binary], + }, + ); + + assert!( + submissions.is_ok(), + "Can add a channel from Osmosis to Gateway" + ); + + let add_channel_body: Body = Body { + timestamp: 1u32, + nonce: 1u32, + emitter_chain: Chain::Solana, + emitter_address: GOVERNANCE_EMITTER, + sequence: 1u64, + consistency_level: 0, + payload: GovernancePacket { + chain: Chain::Osmosis, + action: Action::UpdateChannelChain { + channel_id: *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-0", + chain_id: Chain::CosmosHub, + }, + }, + }; + let (_, add_vaa_binary) = sign_vaa_body(wh.clone(), add_channel_body); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_vaa_binary], + }, + ); + + assert!( + submissions.is_err(), + "Cannot add a update a chain besides Gateway" + ); +} + +#[test] +pub fn reject_replayed_add_channel_chain_vaas() { + let wh = WormholeKeeper::new(); + + let querier: MockQuerier = + MockQuerier::new(&[]).with_custom_handler(|q| match q { + WormholeQuery::VerifyVaa { vaa } => { + match WormholeKeeper::new().verify_vaa(&vaa.0, 0u64) { + Ok(_) => SystemResult::Ok(if let Ok(data) = to_binary(&Empty {}) { + ContractResult::Ok(data) + } else { + ContractResult::Err("Unable to convert to binary".to_string()) + }), + Err(e) => SystemResult::Ok(ContractResult::Err(e.to_string())), + } + } + _ => cosmwasm_std::SystemResult::Ok(cosmwasm_std::ContractResult::Ok( + to_binary(&Empty {}).unwrap(), + )), + }); + + let mut mut_deps = DepsMut { + storage: &mut MockStorage::default(), + api: &MockApi::default(), + querier: QuerierWrapper::new(&querier), + }; + let info = mock_info("sender", &[]); + + let add_channel_body = create_gov_vaa_body(1, Chain::Osmosis, *b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00channel-0"); + let (_, add_vaa_binary) = sign_vaa_body(wh.clone(), add_channel_body); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_vaa_binary.clone()], + }, + ); + + assert!( + submissions.is_ok(), + "Can add a channel from Osmosis to Gateway" + ); + + let submissions = execute( + mut_deps.branch(), + mock_env(), + info.clone(), + ExecuteMsg::SubmitUpdateChannelChain { + vaas: vec![add_vaa_binary], + }, + ); + + assert!(submissions.is_err(), "Cannot replay the same VAA"); +} diff --git a/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/mod.rs b/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/mod.rs new file mode 100644 index 0000000000..7e5f6c1060 --- /dev/null +++ b/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/mod.rs @@ -0,0 +1,2 @@ +pub mod integration_tests; +pub mod test_utils; diff --git a/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/test_utils.rs b/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/test_utils.rs new file mode 100644 index 0000000000..702d83bf7c --- /dev/null +++ b/cosmwasm/contracts/wormchain-ibc-receiver/src/tests/test_utils.rs @@ -0,0 +1,66 @@ +use cosmwasm_std::{Binary, Uint256}; +use serde::Serialize; +use wormhole_bindings::fake::WormholeKeeper; +use wormhole_sdk::{ + ibc_receiver::{Action, GovernancePacket}, + token::Message, + vaa::{Body, Header, Vaa}, + Address, Amount, Chain, GOVERNANCE_EMITTER, +}; + +pub fn create_transfer_vaa_body(i: usize) -> Body { + Body { + timestamp: i as u32, + nonce: i as u32, + emitter_chain: (i as u16).into(), + emitter_address: Address([(i as u8); 32]), + sequence: i as u64, + consistency_level: 32, + payload: Message::Transfer { + amount: Amount(Uint256::from(i as u128).to_be_bytes()), + token_address: Address([(i + 1) as u8; 32]), + token_chain: (i as u16).into(), + recipient: Address([i as u8; 32]), + recipient_chain: ((i + 2) as u16).into(), + fee: Amount([0u8; 32]), + }, + } +} + +pub fn create_gov_vaa_body( + i: usize, + chain_id: Chain, + channel_id: [u8; 64], +) -> Body { + Body { + timestamp: i as u32, + nonce: i as u32, + emitter_chain: Chain::Solana, + emitter_address: GOVERNANCE_EMITTER, + sequence: i as u64, + consistency_level: 0, + payload: GovernancePacket { + chain: Chain::Wormchain, + action: Action::UpdateChannelChain { + channel_id, + chain_id, + }, + }, + } +} + +pub fn sign_vaa_body(wh: WormholeKeeper, body: Body

) -> (Vaa

, Binary) { + let data = serde_wormhole::to_vec(&body).unwrap(); + let signatures = WormholeKeeper::new().sign(&data); + + let header = Header { + version: 1, + guardian_set_index: wh.guardian_set_index(), + signatures, + }; + + let v = (header, body).into(); + let data = serde_wormhole::to_vec(&v).map(From::from).unwrap(); + + (v, data) +} diff --git a/cspell-custom-words.txt b/cspell-custom-words.txt index 1cb8060b6f..c8410ef40a 100644 --- a/cspell-custom-words.txt +++ b/cspell-custom-words.txt @@ -1,5 +1,5 @@ -acala Acala +acala Acks Alertmanager algod @@ -12,8 +12,8 @@ authorisation authorise authorised backdoors -bech Bech +bech behaviour Berachain bigset @@ -25,8 +25,8 @@ bytecodes callstack ccqlistener CCTP -celestia Celestia +celestia celo certusone Chainlink @@ -37,8 +37,8 @@ Concat conftest Cosm cosmoshub -cosmwasm Cosmwasm +cosmwasm counterparty cpus crosschain @@ -46,8 +46,8 @@ Cyfrin datagram denoms devnet -dymension Dymension +dymension ethcrypto ethersproject ETHRPC @@ -59,16 +59,16 @@ funder gogoproto goimports gossipv -guardiand GUARDIAND +guardiand guardiand's Hacken hashdump -healthcheck Healthcheck +healthcheck hexdump -holesky Holesky +holesky horcrux ICCO incentivized @@ -76,28 +76,28 @@ incentivizing initialisation initialised initialiser -injective Injective +injective inotify intcblock -ints Ints +ints IPFS journalctl -karura Karura +karura Keccak -kevm KEVM +kevm keymap keytool -klaytn Klaytn +klaytn kompiled kompiles Kudelski -kujira Kujira +kujira lamports lastrun libp @@ -113,11 +113,12 @@ moonscan moretags Neodyme nhooyr -obsv +Nygard Obsv +obsv optimisation -optin Optin +optin parachain pdas permissioned @@ -128,16 +129,17 @@ Polkadot Positionals prefunded promauto -proto Proto +proto protobuf protos prototxt +Pryce's pubkey pushbytes pushint -pytest Pytest +pytest pythnet QUIC ramfs @@ -155,26 +157,29 @@ seievm Sepolia serde setcap -snaxchain SnaxChain -solana +snaxchain Solana +solana Solana's spydk Starport statesync +Strangelove struct structs subdenom -subdenoms Subdenoms +subdenoms +submessage supermajority superminority -tendermint Tendermint +tendermint terrad tokenbridge tokenfactory +toolset trustlessly tsig tsproto @@ -196,14 +201,15 @@ vimdiff vphash wasmhooks wasms +wasmvm WORKDIR -wormchain Wormchain +wormchain wormchaind Wormholescan wormscan wormscanurl xlayer -xpla XPLA -Zellic +xpla +Zellic \ No newline at end of file diff --git a/wormchain/docs/architecture/0004-tendermint-core-will-be-migrated-to-cometbft-by-bumping-to-latest-cosmos-sdk-upstream-v0-45.md b/wormchain/docs/architecture/0004-tendermint-core-will-be-migrated-to-cometbft-by-bumping-to-latest-cosmos-sdk-upstream-v0-45.md index 7f7f432665..90db868eb6 100644 --- a/wormchain/docs/architecture/0004-tendermint-core-will-be-migrated-to-cometbft-by-bumping-to-latest-cosmos-sdk-upstream-v0-45.md +++ b/wormchain/docs/architecture/0004-tendermint-core-will-be-migrated-to-cometbft-by-bumping-to-latest-cosmos-sdk-upstream-v0-45.md @@ -14,7 +14,7 @@ This version of the Cosmos SDK was released before the fork of Tendermint Core a To facilitate a more modern usage of the Cosmos SDK, projects should move away from Tendermint Core to CometBFT, as it is more up-to-date, maintained and provides security and bug fixes. -The Cosmos SDK team slowly rolled out migrations from Tendermint Core to CometBFT in the Comsos SDK repo, and this migration was implemented in the v0.45 line in release [v0.45.15](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.45.15). +The Cosmos SDK team slowly rolled out migrations from Tendermint Core to CometBFT in the Cosmos SDK repo, and this migration was implemented in the v0.45 line in release [v0.45.15](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.45.15). ## Decision