diff --git a/ante/sendblock_test.go b/ante/sendblock_test.go index 38ab9d57100..59818a5be2c 100644 --- a/ante/sendblock_test.go +++ b/ante/sendblock_test.go @@ -14,19 +14,21 @@ func TestSendBlockDecorator(t *testing.T) { to sdk.AccAddress expectPass bool }{ - {sdk.AccAddress("honest-sender"), sdk.AccAddress("honest-address"), true}, - {sdk.AccAddress("honest-sender"), sdk.AccAddress("recovery-address"), true}, - {sdk.AccAddress("malicious-sender"), sdk.AccAddress("recovery-address"), true}, - {sdk.AccAddress("malicious-sender"), sdk.AccAddress("random-address"), false}, + {sdk.AccAddress("honest-sender_______"), sdk.AccAddress("honest-address"), true}, + {sdk.AccAddress("honest-sender_______"), sdk.AccAddress("recovery-address"), true}, + {sdk.AccAddress("malicious-sender____"), sdk.AccAddress("recovery-address"), true}, + {sdk.AccAddress("malicious-sender____"), sdk.AccAddress("random-address"), false}, } permittedOnlySendTo := map[string]string{ - sdk.AccAddress("malicious-sender").String(): sdk.AccAddress("recovery-address").String(), + sdk.AccAddress("malicious-sender____").String(): sdk.AccAddress("recovery-address").String(), } decorator := NewSendBlockDecorator(SendBlockOptions{permittedOnlySendTo}) for _, testCase := range testCases { - err := decorator.CheckIfBlocked([]sdk.Msg{bank.NewMsgSend(testCase.from, testCase.to, sdk.NewCoins(sdk.NewInt64Coin("test", 1)))}) + err := decorator.CheckIfBlocked( + []sdk.Msg{ + bank.NewMsgSend(testCase.from, testCase.to, sdk.NewCoins(sdk.NewInt64Coin("test", 1)))}) if testCase.expectPass { require.NoError(t, err) } else { diff --git a/app/ante.go b/app/ante.go index 0705af4d3b2..ba999fc802a 100644 --- a/app/ante.go +++ b/app/ante.go @@ -12,8 +12,8 @@ import ( "github.com/cosmos/cosmos-sdk/x/auth/signing" osmoante "github.com/osmosis-labs/osmosis/v7/ante" + v9 "github.com/osmosis-labs/osmosis/v7/app/upgrades/v9" - v8 "github.com/osmosis-labs/osmosis/v7/app/upgrades/v8" txfeeskeeper "github.com/osmosis-labs/osmosis/v7/x/txfees/keeper" txfeestypes "github.com/osmosis-labs/osmosis/v7/x/txfees/types" ) @@ -42,7 +42,7 @@ func NewAnteHandler( wasmkeeper.NewLimitSimulationGasDecorator(wasmConfig.SimulationGasLimit), wasmkeeper.NewCountTXDecorator(txCounterStoreKey), ante.NewRejectExtensionOptionsDecorator(), - v8.MsgFilterDecorator{}, + v9.MsgFilterDecorator{}, // Use Mempool Fee Decorator from our txfees module instead of default one from auth // https://github.com/cosmos/cosmos-sdk/blob/master/x/auth/middleware/fee.go#L34 mempoolFeeDecorator, diff --git a/app/upgrades/v9/msg_filter_ante.go b/app/upgrades/v9/msg_filter_ante.go new file mode 100644 index 00000000000..b6035b35976 --- /dev/null +++ b/app/upgrades/v9/msg_filter_ante.go @@ -0,0 +1,35 @@ +package v9 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + ibcchanneltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" +) + +// MsgFilterDecorator defines an AnteHandler decorator for the v9 upgrade that +// provide height-gated message filtering acceptance. +type MsgFilterDecorator struct{} + +// AnteHandle performs an AnteHandler check that returns an error if the tx contains a message +// that is blocked. +// Right now, we block MsgTimeoutOnClose due to incorrect behavior that could occur if a packet is re-enabled. +func (mfd MsgFilterDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { + if hasInvalidMsgs(tx.GetMsgs()) { + currHeight := ctx.BlockHeight() + return ctx, fmt.Errorf("tx contains unsupported message types at height %d", currHeight) + } + + return next(ctx, tx, simulate) +} + +func hasInvalidMsgs(msgs []sdk.Msg) bool { + for _, msg := range msgs { + switch msg.(type) { + case *ibcchanneltypes.MsgTimeoutOnClose: + return true + } + } + + return false +} diff --git a/app/upgrades/v9/msg_filter_ante_test.go b/app/upgrades/v9/msg_filter_ante_test.go new file mode 100644 index 00000000000..0a3d9b5fee5 --- /dev/null +++ b/app/upgrades/v9/msg_filter_ante_test.go @@ -0,0 +1,70 @@ +package v9_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/stretchr/testify/require" + + ibcchanneltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" + + "github.com/osmosis-labs/osmosis/v7/app" + v8 "github.com/osmosis-labs/osmosis/v7/app/upgrades/v8" + v9 "github.com/osmosis-labs/osmosis/v7/app/upgrades/v9" +) + +func noOpAnteDecorator() sdk.AnteHandler { + return func(ctx sdk.Context, _ sdk.Tx, _ bool) (sdk.Context, error) { + return ctx, nil + } +} + +func TestMsgFilterDecorator(t *testing.T) { + handler := v9.MsgFilterDecorator{} + txCfg := app.MakeEncodingConfig().TxConfig + + addr1 := sdk.AccAddress([]byte("addr1_______________")) + addr2 := sdk.AccAddress([]byte("addr2_______________")) + + testCases := []struct { + name string + ctx sdk.Context + msgs []sdk.Msg + expectErr bool + }{ + { + name: "valid tx", + ctx: sdk.Context{}.WithBlockHeight(v8.UpgradeHeight - 1), + msgs: []sdk.Msg{ + banktypes.NewMsgSend(addr1, addr2, sdk.NewCoins(sdk.NewInt64Coin("foo", 5))), + }, + expectErr: false, + }, + { + name: "invalid tx", + ctx: sdk.Context{}.WithBlockHeight(v8.UpgradeHeight), + msgs: []sdk.Msg{ + banktypes.NewMsgSend(addr1, addr2, sdk.NewCoins(sdk.NewInt64Coin("foo", 5))), + &ibcchanneltypes.MsgTimeoutOnClose{}, + }, + expectErr: true, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + txBuilder := txCfg.NewTxBuilder() + require.NoError(t, txBuilder.SetMsgs(tc.msgs...)) + + _, err := handler.AnteHandle(tc.ctx, txBuilder.GetTx(), false, noOpAnteDecorator()) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/tests/e2e/docker/image_config.go b/tests/e2e/docker/image_config.go new file mode 100644 index 00000000000..db072e5e694 --- /dev/null +++ b/tests/e2e/docker/image_config.go @@ -0,0 +1,64 @@ +package docker + +// ImageConfig contains all images and their respective tags +// needed for running e2e tests. +type ImageConfig struct { + InitRepository string + InitTag string + + OsmosisRepository string + OsmosisTag string + + RelayerRepository string + RelayerTag string +} + +const ( + // Local osmosis repo/version. + // It is used when skipping upgrade by setting OSMOSIS_E2E_SKIP_UPGRADE to true). + // This image should be pre-built with `make docker-build-debug` either in CI or locally. + LocalOsmoRepository = "osmosis" + LocalOsmoTag = "debug" + // Local osmosis repo/version for osmosis initialization. + // It is used when skipping upgrade by setting OSMOSIS_E2E_SKIP_UPGRADE to true). + // This image should be pre-built with `make docker-build-e2e-chain-init` either in CI or locally. + localInitRepository = "osmosis-e2e-chain-init" + localInitTag = "debug" + // Pre-upgrade osmosis repo/tag to pull. + // It should be uploaded to Docker Hub. OSMOSIS_E2E_SKIP_UPGRADE should be unset + // for this functionality to be used. + previousVersionOsmoRepository = "osmolabs/osmosis-dev" + previousVersionOsmoTag = "v8.0.0-debug" + // Pre-upgrade repo/tag for osmosis initialization (this should be one version below upgradeVersion) + previousVersionInitRepository = "osmolabs/osmosis-init" + previousVersionInitTag = "v8.0.0" + // Hermes repo/version for relayer + relayerRepository = "osmolabs/hermes" + relayerTag = "0.13.0" +) + +// Returns ImageConfig needed for running e2e test. +// If isUpgrade is true, returns images for running the upgrade +// Otherwise, returns images for running non-upgrade e2e tests. +func NewImageConfig(isUpgrade bool) *ImageConfig { + config := &ImageConfig{ + RelayerRepository: relayerRepository, + RelayerTag: relayerTag, + } + + if isUpgrade { + config.InitRepository = previousVersionInitRepository + config.InitTag = previousVersionInitTag + + config.OsmosisRepository = previousVersionOsmoRepository + config.OsmosisTag = previousVersionOsmoTag + } else { + config.InitRepository = localInitRepository + config.InitTag = localInitTag + + config.OsmosisRepository = LocalOsmoRepository + config.OsmosisTag = LocalOsmoTag + } + + return config +} diff --git a/tests/e2e/e2e_setup_test.go b/tests/e2e/e2e_setup_test.go index 399bfdc3e32..d5fe898766e 100644 --- a/tests/e2e/e2e_setup_test.go +++ b/tests/e2e/e2e_setup_test.go @@ -21,25 +21,33 @@ import ( rpchttp "github.com/tendermint/tendermint/rpc/client/http" "github.com/osmosis-labs/osmosis/v7/tests/e2e/chain" + dockerconfig "github.com/osmosis-labs/osmosis/v7/tests/e2e/docker" "github.com/osmosis-labs/osmosis/v7/tests/e2e/util" ) -var ( +type status struct { + LatestHeight string `json:"latest_block_height"` +} + +type syncInfo struct { + SyncInfo status `json:"SyncInfo"` +} + +type chainConfig struct { + // voting period is number of blocks it takes to deposit, 1.2 seconds per validator to vote on the prop, and a buffer. + votingPeriod float32 + // upgrade proposal height for chain. + propHeight int + // Indexes of the validators to skip from running during initialization. + // This is needed for testing functionality like state-sync where we would + // like to start a node during tests post-initialization. + skipRunValidatorIndexes map[int]struct{} + chain *chain.Chain +} + +const ( // osmosis version being upgraded to (folder must exist here https://github.com/osmosis-labs/osmosis/tree/main/app/upgrades) upgradeVersion = "v9" - // osmosis repo/version for initialization (this should be one version below upgradeVersion) - initRepository = "osmolabs/osmosis-init" - initVersion = "v8.0.0" - // pre upgrade osmosis repo/version to pull (should match initVersion numer) - debugRepository = "osmolabs/osmosis-dev" - debugVersion = "v8.0.0-debug" - // hermes repo/version for relayer - relayerRepository = "osmolabs/hermes" - relayerVersion = "0.13.0" - // voting period for chain A - votingPeriodA float32 - // voting period for chain B - votingPeriodB float32 // estimated number of blocks it takes to submit for a proposal propSubmitBlocks float32 = 10 // estimated number of blocks it takes to deposit for a proposal @@ -48,14 +56,11 @@ var ( propVoteBlocks float32 = 1.2 // number of blocks used as a calculation buffer propBufferBlocks float32 = 5 - // variable used to switch between chain A and B prop height in for loop - propHeight int - // upgrade proposal height for chain A - propHeightA int - // upgrade proposal height for chain B - propHeightB int // max retries for json unmarshalling maxRetries = 60 +) + +var ( // whatever number of validator configs get posted here are how many validators that will spawn on chain A and B respectively validatorConfigsChainA = []*chain.ValidatorConfig{ { @@ -79,6 +84,13 @@ var ( SnapshotInterval: 1500, SnapshotKeepRecent: 2, }, + { + Pruning: "everything", + PruningKeepRecent: "0", + PruningInterval: "0", + SnapshotInterval: 0, + SnapshotKeepRecent: 0, + }, } validatorConfigsChainB = []*chain.ValidatorConfig{ { @@ -109,20 +121,13 @@ type IntegrationTestSuite struct { suite.Suite tmpDirs []string - chains []*chain.Chain + chainConfigs []*chainConfig dkrPool *dockertest.Pool dkrNet *dockertest.Network hermesResource *dockertest.Resource initResource *dockertest.Resource valResources map[string][]*dockertest.Resource -} - -type status struct { - LatestHeight string `json:"latest_block_height"` -} - -type syncInfo struct { - SyncInfo status `json:"SyncInfo"` + dockerImages dockerconfig.ImageConfig } func TestIntegrationTestSuite(t *testing.T) { @@ -132,7 +137,7 @@ func TestIntegrationTestSuite(t *testing.T) { func (s *IntegrationTestSuite) SetupSuite() { s.T().Log("setting up e2e integration test suite...") - s.chains = make([]*chain.Chain, 0, 2) + s.chainConfigs = make([]*chainConfig, 0, 2) // The e2e test flow is as follows: // @@ -142,37 +147,40 @@ func (s *IntegrationTestSuite) SetupSuite() { // 2. Start both networks. // 3. Run IBC relayer betweeen the two chains. // 4. Execute various e2e tests, including IBC. + var ( + skipUpgrade bool + err error + ) + if str := os.Getenv("OSMOSIS_E2E_SKIP_UPGRADE"); len(str) > 0 { - skipUpgrade, err := strconv.ParseBool(str) + skipUpgrade, err = strconv.ParseBool(str) s.Require().NoError(err) + } - if skipUpgrade { - debugRepository = "osmosis" - debugVersion = "debug" - s.configureDockerResources(chain.ChainAID, chain.ChainBID) - s.configureChain(chain.ChainAID, validatorConfigsChainA) - s.configureChain(chain.ChainBID, validatorConfigsChainB) - - s.runValidators(s.chains[0], 0) - s.runValidators(s.chains[1], 10) - s.runIBCRelayer() - s.runPostUpgradeTests() - return + s.dockerImages = *dockerconfig.NewImageConfig(!skipUpgrade) + + s.configureDockerResources(chain.ChainAID, chain.ChainBID) + s.configureChain(chain.ChainAID, validatorConfigsChainA, map[int]struct{}{ + 3: {}, // skip validator at index 3 + }) + s.configureChain(chain.ChainBID, validatorConfigsChainB, map[int]struct{}{}) + + for i, chainConfig := range s.chainConfigs { + s.runValidators(chainConfig, s.dockerImages.OsmosisRepository, s.dockerImages.OsmosisTag, i*10) + } + + // Run a relayer between every possible pair of chains. + for i := 0; i < len(s.chainConfigs); i++ { + for j := i + 1; j < len(s.chainConfigs); j++ { + s.runIBCRelayer(s.chainConfigs[i].chain, s.chainConfigs[j].chain) } } - s.configureDockerResources(chain.ChainAID, chain.ChainBID) - s.configureChain(chain.ChainAID, validatorConfigsChainA) - s.configureChain(chain.ChainBID, validatorConfigsChainB) - - s.runValidators(s.chains[0], 0) - s.runValidators(s.chains[1], 10) - s.runIBCRelayer() - // pre upgrade state creation - s.createPreUpgradeState() - // initialize and run the upgrade - s.upgrade() - // post upgrade tests - s.runPostUpgradeTests() + + if !skipUpgrade { + s.createPreUpgradeState() + s.upgrade() + s.runPostUpgradeTests() + } } func (s *IntegrationTestSuite) TearDownSuite() { @@ -197,8 +205,8 @@ func (s *IntegrationTestSuite) TearDownSuite() { s.Require().NoError(s.dkrPool.RemoveNetwork(s.dkrNet)) - for _, chain := range s.chains { - os.RemoveAll(chain.ChainMeta.DataDir) + for _, chainConfig := range s.chainConfigs { + os.RemoveAll(chainConfig.chain.ChainMeta.DataDir) } for _, td := range s.tmpDirs { @@ -206,12 +214,21 @@ func (s *IntegrationTestSuite) TearDownSuite() { } } -func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) { - s.T().Logf("starting %s validator containers...", c.ChainMeta.Id) - s.valResources[c.ChainMeta.Id] = make([]*dockertest.Resource, len(c.Validators)) +func (s *IntegrationTestSuite) runValidators(chainConfig *chainConfig, dockerRepository, dockerTag string, portOffset int) { + chain := chainConfig.chain + s.T().Logf("starting %s validator containers...", chain.ChainMeta.Id) + s.valResources[chain.ChainMeta.Id] = make([]*dockertest.Resource, len(chain.Validators)-len(chainConfig.skipRunValidatorIndexes)) pwd, err := os.Getwd() s.Require().NoError(err) - for i, val := range c.Validators { + for i, val := range chain.Validators { + // Skip some validators from running during set up. + // This is needed for testing functionality like + // state-sunc where we might want to start some validators during tests. + if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok { + s.T().Logf("skipping %s validator with index %d from running...", val.Name, i) + continue + } + runOpts := &dockertest.RunOptions{ Name: val.Name, NetworkID: s.dkrNet.Network.ID, @@ -219,8 +236,8 @@ func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) { fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir), fmt.Sprintf("%s/scripts:/osmosis", pwd), }, - Repository: debugRepository, - Tag: debugVersion, + Repository: dockerRepository, + Tag: dockerTag, Cmd: []string{ "start", }, @@ -245,7 +262,7 @@ func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) { resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart) s.Require().NoError(err) - s.valResources[c.ChainMeta.Id][i] = resource + s.valResources[chain.ChainMeta.Id][i] = resource s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID) } @@ -275,15 +292,15 @@ func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) { ) } -func (s *IntegrationTestSuite) runIBCRelayer() { +func (s *IntegrationTestSuite) runIBCRelayer(chainA *chain.Chain, chainB *chain.Chain) { s.T().Log("starting Hermes relayer container...") tmpDir, err := ioutil.TempDir("", "osmosis-e2e-testnet-hermes-") s.Require().NoError(err) s.tmpDirs = append(s.tmpDirs, tmpDir) - osmoAVal := s.chains[0].Validators[0] - osmoBVal := s.chains[1].Validators[0] + osmoAVal := chainA.Validators[0] + osmoBVal := chainB.Validators[0] hermesCfgPath := path.Join(tmpDir, "hermes") s.Require().NoError(os.MkdirAll(hermesCfgPath, 0o755)) @@ -295,9 +312,9 @@ func (s *IntegrationTestSuite) runIBCRelayer() { s.hermesResource, err = s.dkrPool.RunWithOptions( &dockertest.RunOptions{ - Name: fmt.Sprintf("%s-%s-relayer", s.chains[0].ChainMeta.Id, s.chains[1].ChainMeta.Id), - Repository: relayerRepository, - Tag: relayerVersion, + Name: fmt.Sprintf("%s-%s-relayer", chainA.ChainMeta.Id, chainB.ChainMeta.Id), + Repository: s.dockerImages.RelayerRepository, + Tag: s.dockerImages.RelayerTag, NetworkID: s.dkrNet.Network.ID, Cmd: []string{ "start", @@ -313,12 +330,12 @@ func (s *IntegrationTestSuite) runIBCRelayer() { "3031/tcp": {{HostIP: "", HostPort: "3031"}}, }, Env: []string{ - fmt.Sprintf("OSMO_A_E2E_CHAIN_ID=%s", s.chains[0].ChainMeta.Id), - fmt.Sprintf("OSMO_B_E2E_CHAIN_ID=%s", s.chains[1].ChainMeta.Id), + fmt.Sprintf("OSMO_A_E2E_CHAIN_ID=%s", chainA.ChainMeta.Id), + fmt.Sprintf("OSMO_B_E2E_CHAIN_ID=%s", chainB.ChainMeta.Id), fmt.Sprintf("OSMO_A_E2E_VAL_MNEMONIC=%s", osmoAVal.Mnemonic), fmt.Sprintf("OSMO_B_E2E_VAL_MNEMONIC=%s", osmoBVal.Mnemonic), - fmt.Sprintf("OSMO_A_E2E_VAL_HOST=%s", s.valResources[s.chains[0].ChainMeta.Id][0].Container.Name[1:]), - fmt.Sprintf("OSMO_B_E2E_VAL_HOST=%s", s.valResources[s.chains[1].ChainMeta.Id][0].Container.Name[1:]), + fmt.Sprintf("OSMO_A_E2E_VAL_HOST=%s", s.valResources[chainA.ChainMeta.Id][0].Container.Name[1:]), + fmt.Sprintf("OSMO_B_E2E_VAL_HOST=%s", s.valResources[chainB.ChainMeta.Id][0].Container.Name[1:]), }, Entrypoint: []string{ "sh", @@ -367,41 +384,39 @@ func (s *IntegrationTestSuite) runIBCRelayer() { time.Sleep(10 * time.Second) // create the client, connection and channel between the two Osmosis chains - s.connectIBCChains() + s.connectIBCChains(chainA, chainB) } -func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs []*chain.ValidatorConfig) { - +func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs []*chain.ValidatorConfig, skipValidatorIndexes map[int]struct{}) { s.T().Logf("starting e2e infrastructure for chain-id: %s", chainId) tmpDir, err := ioutil.TempDir("", "osmosis-e2e-testnet-") s.T().Logf("temp directory for chain-id %v: %v", chainId, tmpDir) s.Require().NoError(err) - b, err := json.Marshal(validatorConfigs) + validatorConfigBytes, err := json.Marshal(validatorConfigs) s.Require().NoError(err) numVal := float32(len(validatorConfigs)) - // voting period is number of blocks it takes to deposit, 1.2 seconds per validator to vote on the prop, then a buffer - votingPeriodNum := propDepositBlocks + numVal*propVoteBlocks + propBufferBlocks - if chainId == chain.ChainAID { - votingPeriodA = votingPeriodNum - } else if chainId == chain.ChainBID { - votingPeriodB = votingPeriodNum + + newChainConfig := chainConfig{ + votingPeriod: propDepositBlocks + numVal*propVoteBlocks + propBufferBlocks, + skipRunValidatorIndexes: skipValidatorIndexes, } - votingPeriod := time.Duration(int(votingPeriodNum) * 1000000000) + + votingPeriodDuration := time.Duration(int(newChainConfig.votingPeriod) * 1000000000) s.initResource, err = s.dkrPool.RunWithOptions( &dockertest.RunOptions{ Name: fmt.Sprintf("%s", chainId), - Repository: initRepository, - Tag: initVersion, + Repository: s.dockerImages.InitRepository, + Tag: s.dockerImages.InitTag, NetworkID: s.dkrNet.Network.ID, Cmd: []string{ fmt.Sprintf("--data-dir=%s", tmpDir), fmt.Sprintf("--chain-id=%s", chainId), - fmt.Sprintf("--config=%s", b), - fmt.Sprintf("--voting-period=%v", votingPeriod), + fmt.Sprintf("--config=%s", validatorConfigBytes), + fmt.Sprintf("--voting-period=%v", votingPeriodDuration), }, User: "root:root", Mounts: []string{ @@ -412,8 +427,6 @@ func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs [ ) s.Require().NoError(err) - var newChain chain.Chain - fileName := fmt.Sprintf("%v/%v-encode", tmpDir, chainId) s.T().Logf("serialized init file for chain-id %v: %v", chainId, fileName) @@ -421,7 +434,7 @@ func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs [ // without this, test attempts to unmarshal file before docker container is finished writing for i := 0; i < maxRetries; i++ { encJson, _ := os.ReadFile(fileName) - err = json.Unmarshal(encJson, &newChain) + err = json.Unmarshal(encJson, &newChainConfig.chain) if err == nil { break } @@ -434,8 +447,9 @@ func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs [ time.Sleep(1 * time.Second) } } - s.chains = append(s.chains, &newChain) s.Require().NoError(s.dkrPool.Purge(s.initResource)) + + s.chainConfigs = append(s.chainConfigs, &newChainConfig) } func (s *IntegrationTestSuite) configureDockerResources(chainIDOne, chainIDTwo string) { @@ -459,39 +473,36 @@ func noRestart(config *docker.HostConfig) { func (s *IntegrationTestSuite) upgrade() { // submit, deposit, and vote for upgrade proposal // prop height = current height + voting period + time it takes to submit proposal + small buffer - currentHeightA := s.getCurrentChainHeight(s.valResources[s.chains[0].ChainMeta.Id][0].Container.ID) - propHeightA = currentHeightA + int(votingPeriodA) + int(propSubmitBlocks) + int(propBufferBlocks) - s.submitProposal(s.chains[0], propHeightA) - s.depositProposal(s.chains[0]) - s.voteProposal(s.chains[0]) - // prop height = current height + voting period + time it takes to submit proposal + small buffer - currentHeightB := s.getCurrentChainHeight(s.valResources[s.chains[1].ChainMeta.Id][0].Container.ID) - propHeightB = currentHeightB + int(votingPeriodB) + int(propSubmitBlocks) + int(propBufferBlocks) - s.submitProposal(s.chains[1], propHeightB) - s.depositProposal(s.chains[1]) - s.voteProposal(s.chains[1]) + for _, chainConfig := range s.chainConfigs { + currentHeight := s.getCurrentChainHeight(s.valResources[chainConfig.chain.ChainMeta.Id][0].Container.ID) + chainConfig.propHeight = currentHeight + int(chainConfig.votingPeriod) + int(propSubmitBlocks) + int(propBufferBlocks) + s.submitProposal(chainConfig.chain, chainConfig.propHeight) + s.depositProposal(chainConfig.chain) + s.voteProposal(chainConfig) + } // wait till all chains halt at upgrade height - for _, c := range s.chains { - if c.ChainMeta.Id == chain.ChainAID { - propHeight = propHeightA - } else { - propHeight = propHeightB - } - for i := range c.Validators { + for _, chainConfig := range s.chainConfigs { + curChain := chainConfig.chain + + for i := range chainConfig.chain.Validators { + if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok { + continue + } + // use counter to ensure no new blocks are being created counter := 0 - s.T().Logf("waiting to reach upgrade height on %s validator container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) + s.T().Logf("waiting to reach upgrade height on %s validator container: %s", s.valResources[curChain.ChainMeta.Id][i].Container.Name[1:], s.valResources[curChain.ChainMeta.Id][i].Container.ID) s.Require().Eventually( func() bool { - currentHeight := s.getCurrentChainHeight(s.valResources[c.ChainMeta.Id][i].Container.ID) - if currentHeight != propHeight { - s.T().Logf("current block height on %s is %v, waiting for block %v container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], currentHeight, propHeight, s.valResources[c.ChainMeta.Id][i].Container.ID) + currentHeight := s.getCurrentChainHeight(s.valResources[curChain.ChainMeta.Id][i].Container.ID) + if currentHeight != chainConfig.propHeight { + s.T().Logf("current block height on %s is %v, waiting for block %v container: %s", s.valResources[curChain.ChainMeta.Id][i].Container.Name[1:], currentHeight, chainConfig.propHeight, s.valResources[curChain.ChainMeta.Id][i].Container.ID) } - if currentHeight > propHeight { + if currentHeight > chainConfig.propHeight { panic("chain did not halt at upgrade height") } - if currentHeight == propHeight { + if currentHeight == chainConfig.propHeight { counter++ } return counter == 3 @@ -499,34 +510,48 @@ func (s *IntegrationTestSuite) upgrade() { 5*time.Minute, time.Second, ) - s.T().Logf("reached upgrade height on %s container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) + s.T().Logf("reached upgrade height on %s container: %s", s.valResources[curChain.ChainMeta.Id][i].Container.Name[1:], s.valResources[curChain.ChainMeta.Id][i].Container.ID) } } // remove all containers so we can upgrade them to the new version - for _, chain := range s.chains { - for i := range chain.Validators { + for _, chainConfig := range s.chainConfigs { + curChain := chainConfig.chain + for valIdx := range curChain.Validators { + if _, ok := chainConfig.skipRunValidatorIndexes[valIdx]; ok { + continue + } + var opts docker.RemoveContainerOptions - opts.ID = s.valResources[chain.ChainMeta.Id][i].Container.ID + opts.ID = s.valResources[curChain.ChainMeta.Id][valIdx].Container.ID opts.Force = true s.dkrPool.Client.RemoveContainer(opts) - s.T().Logf("removed container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:]) + s.T().Logf("removed container: %s", s.valResources[curChain.ChainMeta.Id][valIdx].Container.Name[1:]) } } - s.upgradeContainers(s.chains[0]) - s.upgradeContainers(s.chains[1]) + + // remove all containers so we can upgrade them to the new version + for _, chainConfig := range s.chainConfigs { + s.upgradeContainers(chainConfig, chainConfig.propHeight) + } } -func (s *IntegrationTestSuite) upgradeContainers(c *chain.Chain) { +func (s *IntegrationTestSuite) upgradeContainers(chainConfig *chainConfig, propHeight int) { // upgrade containers to the locally compiled daemon - s.T().Logf("starting upgrade for chain-id: %s...", c.ChainMeta.Id) + chain := chainConfig.chain + s.T().Logf("starting upgrade for chain-id: %s...", chain.ChainMeta.Id) pwd, err := os.Getwd() s.Require().NoError(err) - for i, val := range c.Validators { + + for i, val := range chain.Validators { + if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok { + continue + } + runOpts := &dockertest.RunOptions{ Name: val.Name, - Repository: "osmosis", - Tag: "debug", + Repository: dockerconfig.LocalOsmoRepository, + Tag: dockerconfig.LocalOsmoTag, NetworkID: s.dkrNet.Network.ID, User: "root:root", Mounts: []string{ @@ -537,47 +562,51 @@ func (s *IntegrationTestSuite) upgradeContainers(c *chain.Chain) { resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart) s.Require().NoError(err) - s.valResources[c.ChainMeta.Id][i] = resource + s.valResources[chain.ChainMeta.Id][i] = resource s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID) } // check that we are creating blocks again - for i := range c.Validators { - if c.ChainMeta.Id == chain.ChainAID { - propHeight = propHeightA - } else { - propHeight = propHeightB + for i := range chain.Validators { + if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok { + continue } + s.Require().Eventually( func() bool { - currentHeight := s.getCurrentChainHeight(s.valResources[c.ChainMeta.Id][i].Container.ID) + currentHeight := s.getCurrentChainHeight(s.valResources[chain.ChainMeta.Id][i].Container.ID) if currentHeight <= propHeight { - s.T().Logf("current block height on %s is %v, waiting to create blocks container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], currentHeight, s.valResources[c.ChainMeta.Id][i].Container.ID) + s.T().Logf("current block height on %s is %v, waiting to create blocks container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:], currentHeight, s.valResources[chain.ChainMeta.Id][i].Container.ID) } return currentHeight > propHeight }, 5*time.Minute, time.Second, ) - s.T().Logf("upgrade successful on %s validator container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) + s.T().Logf("upgrade successful on %s validator container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:], s.valResources[chain.ChainMeta.Id][i].Container.ID) } - } func (s *IntegrationTestSuite) createPreUpgradeState() { - s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken) - s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.OsmoToken) - s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.StakeToken) - s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.StakeToken) - s.createPool(s.chains[0], "pool1A.json") - s.createPool(s.chains[1], "pool1B.json") + chainA := s.chainConfigs[0].chain + chainB := s.chainConfigs[1].chain + + s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.OsmoToken) + s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.OsmoToken) + s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.StakeToken) + s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.StakeToken) + s.createPool(chainA, "pool1A.json") + s.createPool(chainB, "pool1B.json") } func (s *IntegrationTestSuite) runPostUpgradeTests() { - s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken) - s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.OsmoToken) - s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.StakeToken) - s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.StakeToken) - s.createPool(s.chains[0], "pool2A.json") - s.createPool(s.chains[1], "pool2B.json") + chainA := s.chainConfigs[0].chain + chainB := s.chainConfigs[1].chain + + s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.OsmoToken) + s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.OsmoToken) + s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.StakeToken) + s.sendIBC(chainB, chainA, chainA.Validators[0].PublicAddress, chain.StakeToken) + s.createPool(chainA, "pool2A.json") + s.createPool(chainB, "pool2B.json") } diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index f329cdcd80f..43030e83b5a 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -5,9 +5,10 @@ import ( ) func (s *IntegrationTestSuite) TestIBCTokenTransfer() { - s.Run("send_uosmo_to_chainB", func() { - // compare coins of reciever pre and post IBC send - // diff should only be the amount sent - s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken) - }) + chainA := s.chainConfigs[0].chain + chainB := s.chainConfigs[1].chain + + // compare coins of reciever pre and post IBC send + // diff should only be the amount sent + s.sendIBC(chainA, chainB, chainB.Validators[0].PublicAddress, chain.OsmoToken) } diff --git a/tests/e2e/e2e_util_test.go b/tests/e2e/e2e_util_test.go index 1a6c26f7870..f0324a81e6a 100644 --- a/tests/e2e/e2e_util_test.go +++ b/tests/e2e/e2e_util_test.go @@ -17,8 +17,8 @@ import ( "github.com/osmosis-labs/osmosis/v7/tests/e2e/util" ) -func (s *IntegrationTestSuite) connectIBCChains() { - s.T().Logf("connecting %s and %s chains via IBC", s.chains[0].ChainMeta.Id, s.chains[1].ChainMeta.Id) +func (s *IntegrationTestSuite) connectIBCChains(chainA *chain.Chain, chainB *chain.Chain) { + s.T().Logf("connecting %s and %s chains via IBC", chainA.ChainMeta.Id, chainB.ChainMeta.Id) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -33,8 +33,8 @@ func (s *IntegrationTestSuite) connectIBCChains() { "hermes", "create", "channel", - s.chains[0].ChainMeta.Id, - s.chains[1].ChainMeta.Id, + chainA.ChainMeta.Id, + chainB.ChainMeta.Id, "--port-a=transfer", "--port-b=transfer", }, @@ -63,7 +63,7 @@ func (s *IntegrationTestSuite) connectIBCChains() { "failed to connect chains via IBC: %s", errBuf.String(), ) - s.T().Logf("connected %s and %s chains via IBC", s.chains[0].ChainMeta.Id, s.chains[1].ChainMeta.Id) + s.T().Logf("connected %s and %s chains via IBC", chainA.ChainMeta.Id, chainB.ChainMeta.Id) } func (s *IntegrationTestSuite) sendIBC(srcChain *chain.Chain, dstChain *chain.Chain, recipient string, token sdk.Coin) { @@ -225,12 +225,16 @@ func (s *IntegrationTestSuite) depositProposal(c *chain.Chain) { } -func (s *IntegrationTestSuite) voteProposal(c *chain.Chain) { +func (s *IntegrationTestSuite) voteProposal(chainConfig *chainConfig) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() + chain := chainConfig.chain - s.T().Logf("voting for upgrade proposal for chain-id: %s", c.ChainMeta.Id) - for i := range c.Validators { + s.T().Logf("voting for upgrade proposal for chain-id: %s", chain.ChainMeta.Id) + for i := range chain.Validators { + if _, ok := chainConfig.skipRunValidatorIndexes[i]; ok { + continue + } var ( outBuf bytes.Buffer @@ -243,10 +247,10 @@ func (s *IntegrationTestSuite) voteProposal(c *chain.Chain) { Context: ctx, AttachStdout: true, AttachStderr: true, - Container: s.valResources[c.ChainMeta.Id][i].Container.ID, + Container: s.valResources[chain.ChainMeta.Id][i].Container.ID, User: "root", Cmd: []string{ - "osmosisd", "tx", "gov", "vote", "1", "yes", "--from=val", fmt.Sprintf("--chain-id=%s", c.ChainMeta.Id), "-b=block", "--yes", "--keyring-backend=test", + "osmosisd", "tx", "gov", "vote", "1", "yes", "--from=val", fmt.Sprintf("--chain-id=%s", chain.ChainMeta.Id), "-b=block", "--yes", "--keyring-backend=test", }, }) s.Require().NoError(err) @@ -264,7 +268,7 @@ func (s *IntegrationTestSuite) voteProposal(c *chain.Chain) { "tx returned a non-zero code; stdout: %s, stderr: %s", outBuf.String(), errBuf.String(), ) - s.T().Logf("successfully voted for proposal from %s container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) + s.T().Logf("successfully voted for proposal from %s container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:], s.valResources[chain.ChainMeta.Id][i].Container.ID) } }