Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: more e2e upgrade testing #1458

Merged
merged 24 commits into from
May 16, 2022
Merged
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
1dacf98
chain init changes
czarcas7ic May 5, 2022
ce4415d
introduce val init options
czarcas7ic May 5, 2022
b66a0a1
Apply suggestions from code review
czarcas7ic May 6, 2022
383f2a4
chain init change
czarcas7ic May 6, 2022
d857d9d
using new struc
czarcas7ic May 6, 2022
1b3488e
Merge branch 'main' into adam/init-changes-2
czarcas7ic May 6, 2022
0d90bab
Update tests/e2e/e2e_setup_test.go
czarcas7ic May 6, 2022
afb2189
lint change
czarcas7ic May 6, 2022
eaceee2
address roman comments
czarcas7ic May 6, 2022
ff312db
move comments to chain package
czarcas7ic May 7, 2022
78b2687
new e2e upgrade features
czarcas7ic May 10, 2022
9cbea1a
Merge branch 'main' into adam/e2e-changes-2
czarcas7ic May 10, 2022
5b50edb
pool file permissions fix
czarcas7ic May 10, 2022
f17c560
Apply suggestions from code review
czarcas7ic May 10, 2022
9f29989
edits from code review
czarcas7ic May 10, 2022
dace939
Merge branch 'main' into adam/e2e-changes-2
czarcas7ic May 10, 2022
d8ca1ae
Apply suggestions from code review
czarcas7ic May 10, 2022
08177e0
e2e test rework
czarcas7ic May 12, 2022
b020d98
increase destination time check
czarcas7ic May 12, 2022
81ff7ef
Merge branch 'main' into adam/e2e-changes-2
czarcas7ic May 12, 2022
97858da
Merge branch 'main' into adam/e2e-changes-2
ValarDragon May 16, 2022
c4f55df
change upgrade from v8 to v9
czarcas7ic May 16, 2022
176344d
Merge branch 'main' into adam/e2e-changes-2
czarcas7ic May 16, 2022
ddc5137
Merge branch 'main' into adam/e2e-changes-2
czarcas7ic May 16, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions tests/e2e/chain/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"path/filepath"
"strings"
"time"

"github.com/cosmos/cosmos-sdk/server"
srvconfig "github.com/cosmos/cosmos-sdk/server/config"
Expand Down Expand Up @@ -33,10 +34,10 @@ const (
// common
OsmoDenom = "uosmo"
StakeDenom = "stake"
IbcDenom = "ibc/ED07A3391A112B175915CD8FAF43A2DA8E4790EDE12566649D0C2F97716B8518"
OsmoIBCDenom = "ibc/ED07A3391A112B175915CD8FAF43A2DA8E4790EDE12566649D0C2F97716B8518"
StakeIBCDenom = "ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B7787"
MinGasPrice = "0.000"
IbcSendAmount = 3300000000
VotingPeriod = 30000000000 // 30 seconds
// chainA
ChainAID = "osmo-test-a"
OsmoBalanceA = 200000000000
Expand All @@ -57,6 +58,8 @@ var (

InitBalanceStrA = fmt.Sprintf("%d%s,%d%s", OsmoBalanceA, OsmoDenom, StakeBalanceA, StakeDenom)
InitBalanceStrB = fmt.Sprintf("%d%s,%d%s", OsmoBalanceB, OsmoDenom, StakeBalanceB, StakeDenom)
OsmoToken = sdk.NewInt64Coin(OsmoDenom, IbcSendAmount) // 3,300uosmo
StakeToken = sdk.NewInt64Coin(StakeDenom, IbcSendAmount) // 3,300ustake
)

func addAccount(path, moniker, amountStr string, accAddr sdk.AccAddress) error {
Expand Down Expand Up @@ -130,7 +133,7 @@ func addAccount(path, moniker, amountStr string, accAddr sdk.AccAddress) error {
return genutil.ExportGenesisFile(genDoc, genFile)
}

func initGenesis(c *internalChain) error {
func initGenesis(c *internalChain, votingPeriod time.Duration) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the future, this can get messy. We should revise this such that we can instead pass the entire module genesis state. But for now, this is OK.

serverCtx := server.NewDefaultContext()
config := serverCtx.Config

Expand Down Expand Up @@ -174,7 +177,7 @@ func initGenesis(c *internalChain) error {
}

govGenState.VotingParams = govtypes.VotingParams{
VotingPeriod: VotingPeriod,
VotingPeriod: votingPeriod,
}

gz, err := util.Cdc.MarshalJSON(&govGenState)
Expand Down
6 changes: 4 additions & 2 deletions tests/e2e/chain/main.go
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
package chain

func Init(id, dataDir string, validatorConfigs []*ValidatorConfig) (*Chain, error) {
import "time"

func Init(id, dataDir string, validatorConfigs []*ValidatorConfig, votingPeriod time.Duration) (*Chain, error) {
chain, err := new(id, dataDir)
if err != nil {
return nil, err
}
if err := initNodes(chain, len(validatorConfigs)); err != nil {
return nil, err
}
if err := initGenesis(chain); err != nil {
if err := initGenesis(chain, votingPeriod); err != nil {
return nil, err
}
if err := initValidatorConfigs(chain, validatorConfigs); err != nil {
Expand Down
13 changes: 8 additions & 5 deletions tests/e2e/chain_init/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,24 @@ import (
"flag"
"fmt"
"os"
"time"

"github.com/osmosis-labs/osmosis/v7/tests/e2e/chain"
)

func main() {
var (
valConfig []*chain.ValidatorConfig
dataDir string
chainId string
config string
valConfig []*chain.ValidatorConfig
dataDir string
chainId string
config string
votingPeriod time.Duration
)

flag.StringVar(&dataDir, "data-dir", "", "chain data directory")
flag.StringVar(&chainId, "chain-id", "", "chain ID")
flag.StringVar(&config, "config", "", "serialized config")
flag.DurationVar(&votingPeriod, "voting-period", 30000000000, "voting period")

flag.Parse()

Expand All @@ -36,7 +39,7 @@ func main() {
panic(err)
}

createdChain, err := chain.Init(chainId, dataDir, valConfig)
createdChain, err := chain.Init(chainId, dataDir, valConfig, votingPeriod)
if err != nil {
panic(err)
}
Expand Down
200 changes: 140 additions & 60 deletions tests/e2e/e2e_setup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,27 @@ import (
)

var (
// common
maxRetries = 10 // max retries for json unmarshalling
// voting period for chain A
votingPeriodA float32
// voting period for chain B
votingPeriodB float32
// estimated number of blocks it takes to submit for a proposal
propSubmitBlocks float32 = 10
// estimated number of blocks it takes to deposit for a proposal
propDepositBlocks float32 = 10
// number of blocks it takes to vote for a single validator to vote for a proposal
propVoteBlocks float32 = 1.2
// number of blocks used as a calculation buffer
propBufferBlocks float32 = 5
// variable used to switch between chain A and B prop height in for loop
propHeight int
// upgrade proposal height for chain A
propHeightA int
// upgrade proposal height for chain B
propHeightB int
// max retries for json unmarshalling
maxRetries = 60
// whatever number of validator configs get posted here are how many validators that will spawn on chain A and B respectively
validatorConfigsChainA = []*chain.ValidatorConfig{
{
Pruning: "default",
Expand Down Expand Up @@ -113,15 +132,18 @@ func (s *IntegrationTestSuite) SetupSuite() {
// 3. Run IBC relayer betweeen the two chains.
// 4. Execute various e2e tests, including IBC.
s.configureDockerResources(chain.ChainAID, chain.ChainBID)

s.configureChain(chain.ChainAID, validatorConfigsChainA)
s.configureChain(chain.ChainBID, validatorConfigsChainB)

s.runValidators(s.chains[0], 0)
s.runValidators(s.chains[1], 10)
s.runIBCRelayer()
s.initUpgrade()
// pre upgrade state creation
s.createPreUpgradeState()
// initialize and run the upgrade
s.upgrade()
// post upgrade tests
s.runPostUpgradeTests()
}

func (s *IntegrationTestSuite) TearDownSuite() {
Expand Down Expand Up @@ -156,14 +178,17 @@ func (s *IntegrationTestSuite) TearDownSuite() {
}

func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) {
s.T().Logf("starting Osmosis %s validator containers...", c.ChainMeta.Id)
s.T().Logf("starting %s validator containers...", c.ChainMeta.Id)
s.valResources[c.ChainMeta.Id] = make([]*dockertest.Resource, len(c.Validators))
pwd, err := os.Getwd()
s.Require().NoError(err)
for i, val := range c.Validators {
runOpts := &dockertest.RunOptions{
Name: val.Name,
NetworkID: s.dkrNet.Network.ID,
Mounts: []string{
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir),
fmt.Sprintf("%s/scripts:/osmosis", pwd),
},
Repository: "osmolabs/osmosis-dev",
Tag: "v7.2.1-debug",
Expand Down Expand Up @@ -192,7 +217,7 @@ func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) {
s.Require().NoError(err)

s.valResources[c.ChainMeta.Id][i] = resource
s.T().Logf("started Osmosis %s validator container: %s", c.ChainMeta.Id, resource.Container.ID)
s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID)
p0mvn marked this conversation as resolved.
Show resolved Hide resolved
}

rpcClient, err := rpchttp.New("tcp://localhost:26657", "/websocket")
Expand Down Expand Up @@ -327,16 +352,27 @@ func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs [
b, err := json.Marshal(validatorConfigs)
s.Require().NoError(err)

numVal := float32(len(validatorConfigs))
// voting period is number of blocks it takes to deposit, 1.2 seconds per validator to vote on the prop, then a buffer
votingPeriodNum := propDepositBlocks + numVal*propVoteBlocks + propBufferBlocks
if chainId == chain.ChainAID {
votingPeriodA = votingPeriodNum
} else if chainId == chain.ChainBID {
votingPeriodB = votingPeriodNum
}
votingPeriod := time.Duration(int(votingPeriodNum) * 1000000000)

s.initResource, err = s.dkrPool.RunWithOptions(
&dockertest.RunOptions{
Name: fmt.Sprintf("%s", chainId),
Repository: "osmolabs/osmosis-init",
Tag: "v7.3.0",
Tag: "v7.3.0-1",
NetworkID: s.dkrNet.Network.ID,
Cmd: []string{
fmt.Sprintf("--data-dir=%s", tmpDir),
fmt.Sprintf("--chain-id=%s", chainId),
fmt.Sprintf("--config=%s", b),
fmt.Sprintf("--voting-period=%v", votingPeriod),
},
User: "root:root",
Mounts: []string{
Expand Down Expand Up @@ -391,84 +427,128 @@ func noRestart(config *docker.HostConfig) {
}
}

func (s *IntegrationTestSuite) initUpgrade() {
func (s *IntegrationTestSuite) upgrade() {
// submit, deposit, and vote for upgrade proposal
s.submitProposal(s.chains[0])
s.submitProposal(s.chains[1])
// prop height = current height + voting period + time it takes to submit proposal + small buffer
currentHeightA := s.getCurrentChainHeight(s.valResources[s.chains[0].ChainMeta.Id][0].Container.ID)
propHeightA = currentHeightA + int(votingPeriodA) + int(propSubmitBlocks) + int(propBufferBlocks)
s.submitProposal(s.chains[0], propHeightA)
s.depositProposal(s.chains[0])
s.depositProposal(s.chains[1])
s.voteProposal(s.chains[0])
// prop height = current height + voting period + time it takes to submit proposal + small buffer
currentHeightB := s.getCurrentChainHeight(s.valResources[s.chains[1].ChainMeta.Id][0].Container.ID)
propHeightB = currentHeightB + int(votingPeriodB) + int(propSubmitBlocks) + int(propBufferBlocks)
s.submitProposal(s.chains[1], propHeightB)
s.depositProposal(s.chains[1])
s.voteProposal(s.chains[1])

// wait till all chains halt at upgrade height
for _, chain := range s.chains {
for i := range chain.Validators {
s.T().Logf("waiting to reach upgrade height on %s validator container: %s", chain.ChainMeta.Id, s.valResources[chain.ChainMeta.Id][i].Container.ID)
for _, c := range s.chains {
if c.ChainMeta.Id == chain.ChainAID {
propHeight = propHeightA
} else {
propHeight = propHeightB
}
for i := range c.Validators {
// use counter to ensure no new blocks are being created
counter := 0
s.T().Logf("waiting to reach upgrade height on %s validator container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID)
s.Require().Eventually(
func() bool {
out := s.chainStatus(s.valResources[chain.ChainMeta.Id][i].Container.ID)
var syncInfo syncInfo
json.Unmarshal(out, &syncInfo)
if syncInfo.SyncInfo.LatestHeight != "75" {
s.T().Logf("current block height is %v, waiting for block 75 container: %s", syncInfo.SyncInfo.LatestHeight, s.valResources[chain.ChainMeta.Id][i].Container.ID)
currentHeight := s.getCurrentChainHeight(s.valResources[c.ChainMeta.Id][i].Container.ID)
if currentHeight != propHeight {
s.T().Logf("current block height on %s is %v, waiting for block %v container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], currentHeight, propHeight, s.valResources[c.ChainMeta.Id][i].Container.ID)
}
if currentHeight > propHeight {
panic("chain did not halt at upgrade height")
}
return syncInfo.SyncInfo.LatestHeight == "75"
if currentHeight == propHeight {
counter++
}
return counter == 3
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is the purpose for having both a counter and a 5 minute timeout?

By having a counter running 3 times max every second , we essentially have a 3 second timeout which is much lower than 5*time.Minute. We can then remove the counter, always return true, and change timeout from 5*time.Minute to 3*time.Second

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The purpose of this was to return True 3 times before accepting it as fact. For instance if upgrade height is 72 its possible to return 72 twice and still hit 73 but 3 seconds is too long to hit the same block 3 times in a row without it having been stopped for upgrade. But this is now moot since we will use a hard chain halt height as suggested by you in your previous comment

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that my earlier proposed solution would functionally the same way as right now, just without a counter. But yeah, if it's going to get change - it doesn't matter :)

},
2*time.Minute,
5*time.Second,
5*time.Minute,
time.Second,
)
s.T().Logf("reached upgrade height on %s validator container: %s", chain.ChainMeta.Id, s.valResources[chain.ChainMeta.Id][i].Container.ID)
s.T().Logf("reached upgrade height on %s container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID)
}
}

// remove all containers so we can upgrade them to the new version
for _, chain := range s.chains {
for i := range chain.Validators {
s.Require().NoError(s.dkrPool.RemoveContainerByName(s.valResources[chain.ChainMeta.Id][i].Container.Name))
var opts docker.RemoveContainerOptions
opts.ID = s.valResources[chain.ChainMeta.Id][i].Container.ID
opts.Force = true
s.dkrPool.Client.RemoveContainer(opts)
s.T().Logf("removed container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:])
}
}
s.upgradeContainers(s.chains[0])
s.upgradeContainers(s.chains[1])
}

func (s *IntegrationTestSuite) upgrade() {
func (s *IntegrationTestSuite) upgradeContainers(c *chain.Chain) {
// upgrade containers to the locally compiled daemon
for _, chain := range s.chains {
s.T().Logf("starting upgrade for chain-id: %s...", chain.ChainMeta.Id)
for i, val := range chain.Validators {
runOpts := &dockertest.RunOptions{
Name: val.Name,
Repository: "osmosis",
Tag: "debug",
NetworkID: s.dkrNet.Network.ID,
User: "root:root",
Mounts: []string{
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir),
},
}
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart)
s.Require().NoError(err)

s.valResources[chain.ChainMeta.Id][i] = resource
s.T().Logf("started Osmosis %s validator container: %s", chain.ChainMeta.Id, resource.Container.ID)
s.T().Logf("starting upgrade for chain-id: %s...", c.ChainMeta.Id)
pwd, err := os.Getwd()
s.Require().NoError(err)
for i, val := range c.Validators {
runOpts := &dockertest.RunOptions{
Name: val.Name,
Repository: "osmosis",
Tag: "debug",
NetworkID: s.dkrNet.Network.ID,
User: "root:root",
Mounts: []string{
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir),
fmt.Sprintf("%s/scripts:/osmosis", pwd),
},
}
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart)
s.Require().NoError(err)

s.valResources[c.ChainMeta.Id][i] = resource
s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID)
}

// check that we are hitting blocks again
for _, chain := range s.chains {
for i := range chain.Validators {
s.Require().Eventually(
func() bool {
out := s.chainStatus(s.valResources[chain.ChainMeta.Id][i].Container.ID)
var syncInfo syncInfo
json.Unmarshal(out, &syncInfo)
if syncInfo.SyncInfo.LatestHeight <= "75" {
fmt.Printf("current block height is %v, waiting to hit blocks\n", syncInfo.SyncInfo.LatestHeight)
}
return syncInfo.SyncInfo.LatestHeight > "75"
},
2*time.Minute,
5*time.Second,
)
s.T().Logf("upgrade successful on %s validator container: %s", chain.ChainMeta.Id, s.valResources[chain.ChainMeta.Id][i].Container.ID)
// check that we are creating blocks again
for i := range c.Validators {
if c.ChainMeta.Id == chain.ChainAID {
propHeight = propHeightA
} else {
propHeight = propHeightB
}
s.Require().Eventually(
func() bool {
currentHeight := s.getCurrentChainHeight(s.valResources[c.ChainMeta.Id][i].Container.ID)
if currentHeight <= propHeight {
s.T().Logf("current block height on %s is %v, waiting to create blocks container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], currentHeight, s.valResources[c.ChainMeta.Id][i].Container.ID)
}
return currentHeight > propHeight
},
5*time.Minute,
time.Second,
)
s.T().Logf("upgrade successful on %s validator container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID)
}

}

func (s *IntegrationTestSuite) createPreUpgradeState() {
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.StakeToken)
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.StakeToken)
s.createPool(s.chains[0], "pool1A.json")
s.createPool(s.chains[1], "pool1B.json")
}

func (s *IntegrationTestSuite) runPostUpgradeTests() {
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.OsmoToken)
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.StakeToken)
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.StakeToken)
s.createPool(s.chains[0], "pool2A.json")
s.createPool(s.chains[1], "pool2B.json")
}
Loading