-
Notifications
You must be signed in to change notification settings - Fork 608
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
test: more e2e upgrade testing #1458
Changes from 20 commits
1dacf98
ce4415d
b66a0a1
383f2a4
d857d9d
1b3488e
0d90bab
afb2189
eaceee2
ff312db
78b2687
9cbea1a
5b50edb
f17c560
9f29989
dace939
d8ca1ae
08177e0
b020d98
81ff7ef
97858da
c4f55df
176344d
ddc5137
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -25,8 +25,27 @@ import ( | |
) | ||
|
||
var ( | ||
// common | ||
maxRetries = 10 // max retries for json unmarshalling | ||
// voting period for chain A | ||
votingPeriodA float32 | ||
// voting period for chain B | ||
votingPeriodB float32 | ||
// estimated number of blocks it takes to submit for a proposal | ||
propSubmitBlocks float32 = 10 | ||
// estimated number of blocks it takes to deposit for a proposal | ||
propDepositBlocks float32 = 10 | ||
// number of blocks it takes to vote for a single validator to vote for a proposal | ||
propVoteBlocks float32 = 1.2 | ||
// number of blocks used as a calculation buffer | ||
propBufferBlocks float32 = 5 | ||
// variable used to switch between chain A and B prop height in for loop | ||
propHeight int | ||
// upgrade proposal height for chain A | ||
propHeightA int | ||
// upgrade proposal height for chain B | ||
propHeightB int | ||
// max retries for json unmarshalling | ||
maxRetries = 60 | ||
// whatever number of validator configs get posted here are how many validators that will spawn on chain A and B respectively | ||
validatorConfigsChainA = []*chain.ValidatorConfig{ | ||
{ | ||
Pruning: "default", | ||
|
@@ -113,15 +132,18 @@ func (s *IntegrationTestSuite) SetupSuite() { | |
// 3. Run IBC relayer betweeen the two chains. | ||
// 4. Execute various e2e tests, including IBC. | ||
s.configureDockerResources(chain.ChainAID, chain.ChainBID) | ||
|
||
s.configureChain(chain.ChainAID, validatorConfigsChainA) | ||
s.configureChain(chain.ChainBID, validatorConfigsChainB) | ||
|
||
s.runValidators(s.chains[0], 0) | ||
s.runValidators(s.chains[1], 10) | ||
s.runIBCRelayer() | ||
s.initUpgrade() | ||
// pre upgrade state creation | ||
s.createPreUpgradeState() | ||
// initialize and run the upgrade | ||
s.upgrade() | ||
// post upgrade tests | ||
s.runPostUpgradeTests() | ||
} | ||
|
||
func (s *IntegrationTestSuite) TearDownSuite() { | ||
|
@@ -156,14 +178,17 @@ func (s *IntegrationTestSuite) TearDownSuite() { | |
} | ||
|
||
func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) { | ||
s.T().Logf("starting Osmosis %s validator containers...", c.ChainMeta.Id) | ||
s.T().Logf("starting %s validator containers...", c.ChainMeta.Id) | ||
s.valResources[c.ChainMeta.Id] = make([]*dockertest.Resource, len(c.Validators)) | ||
pwd, err := os.Getwd() | ||
s.Require().NoError(err) | ||
for i, val := range c.Validators { | ||
runOpts := &dockertest.RunOptions{ | ||
Name: val.Name, | ||
NetworkID: s.dkrNet.Network.ID, | ||
Mounts: []string{ | ||
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir), | ||
fmt.Sprintf("%s/scripts:/osmosis", pwd), | ||
}, | ||
Repository: "osmolabs/osmosis-dev", | ||
Tag: "v7.2.1-debug", | ||
|
@@ -192,7 +217,7 @@ func (s *IntegrationTestSuite) runValidators(c *chain.Chain, portOffset int) { | |
s.Require().NoError(err) | ||
|
||
s.valResources[c.ChainMeta.Id][i] = resource | ||
s.T().Logf("started Osmosis %s validator container: %s", c.ChainMeta.Id, resource.Container.ID) | ||
s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID) | ||
p0mvn marked this conversation as resolved.
Show resolved
Hide resolved
|
||
} | ||
|
||
rpcClient, err := rpchttp.New("tcp://localhost:26657", "/websocket") | ||
|
@@ -327,16 +352,27 @@ func (s *IntegrationTestSuite) configureChain(chainId string, validatorConfigs [ | |
b, err := json.Marshal(validatorConfigs) | ||
s.Require().NoError(err) | ||
|
||
numVal := float32(len(validatorConfigs)) | ||
// voting period is number of blocks it takes to deposit, 1.2 seconds per validator to vote on the prop, then a buffer | ||
votingPeriodNum := propDepositBlocks + numVal*propVoteBlocks + propBufferBlocks | ||
if chainId == chain.ChainAID { | ||
votingPeriodA = votingPeriodNum | ||
} else if chainId == chain.ChainBID { | ||
votingPeriodB = votingPeriodNum | ||
} | ||
votingPeriod := time.Duration(int(votingPeriodNum) * 1000000000) | ||
|
||
s.initResource, err = s.dkrPool.RunWithOptions( | ||
&dockertest.RunOptions{ | ||
Name: fmt.Sprintf("%s", chainId), | ||
Repository: "osmolabs/osmosis-init", | ||
Tag: "v7.3.0", | ||
Tag: "v7.3.0-1", | ||
NetworkID: s.dkrNet.Network.ID, | ||
Cmd: []string{ | ||
fmt.Sprintf("--data-dir=%s", tmpDir), | ||
fmt.Sprintf("--chain-id=%s", chainId), | ||
fmt.Sprintf("--config=%s", b), | ||
fmt.Sprintf("--voting-period=%v", votingPeriod), | ||
}, | ||
User: "root:root", | ||
Mounts: []string{ | ||
|
@@ -391,84 +427,128 @@ func noRestart(config *docker.HostConfig) { | |
} | ||
} | ||
|
||
func (s *IntegrationTestSuite) initUpgrade() { | ||
func (s *IntegrationTestSuite) upgrade() { | ||
// submit, deposit, and vote for upgrade proposal | ||
s.submitProposal(s.chains[0]) | ||
s.submitProposal(s.chains[1]) | ||
// prop height = current height + voting period + time it takes to submit proposal + small buffer | ||
currentHeightA := s.getCurrentChainHeight(s.valResources[s.chains[0].ChainMeta.Id][0].Container.ID) | ||
propHeightA = currentHeightA + int(votingPeriodA) + int(propSubmitBlocks) + int(propBufferBlocks) | ||
s.submitProposal(s.chains[0], propHeightA) | ||
s.depositProposal(s.chains[0]) | ||
s.depositProposal(s.chains[1]) | ||
s.voteProposal(s.chains[0]) | ||
// prop height = current height + voting period + time it takes to submit proposal + small buffer | ||
currentHeightB := s.getCurrentChainHeight(s.valResources[s.chains[1].ChainMeta.Id][0].Container.ID) | ||
propHeightB = currentHeightB + int(votingPeriodB) + int(propSubmitBlocks) + int(propBufferBlocks) | ||
s.submitProposal(s.chains[1], propHeightB) | ||
s.depositProposal(s.chains[1]) | ||
s.voteProposal(s.chains[1]) | ||
|
||
// wait till all chains halt at upgrade height | ||
for _, chain := range s.chains { | ||
for i := range chain.Validators { | ||
s.T().Logf("waiting to reach upgrade height on %s validator container: %s", chain.ChainMeta.Id, s.valResources[chain.ChainMeta.Id][i].Container.ID) | ||
for _, c := range s.chains { | ||
if c.ChainMeta.Id == chain.ChainAID { | ||
propHeight = propHeightA | ||
} else { | ||
propHeight = propHeightB | ||
} | ||
for i := range c.Validators { | ||
// use counter to ensure no new blocks are being created | ||
counter := 0 | ||
s.T().Logf("waiting to reach upgrade height on %s validator container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
s.Require().Eventually( | ||
func() bool { | ||
out := s.chainStatus(s.valResources[chain.ChainMeta.Id][i].Container.ID) | ||
var syncInfo syncInfo | ||
json.Unmarshal(out, &syncInfo) | ||
if syncInfo.SyncInfo.LatestHeight != "75" { | ||
s.T().Logf("current block height is %v, waiting for block 75 container: %s", syncInfo.SyncInfo.LatestHeight, s.valResources[chain.ChainMeta.Id][i].Container.ID) | ||
currentHeight := s.getCurrentChainHeight(s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
if currentHeight != propHeight { | ||
s.T().Logf("current block height on %s is %v, waiting for block %v container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], currentHeight, propHeight, s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
} | ||
if currentHeight > propHeight { | ||
panic("chain did not halt at upgrade height") | ||
} | ||
return syncInfo.SyncInfo.LatestHeight == "75" | ||
if currentHeight == propHeight { | ||
counter++ | ||
} | ||
return counter == 3 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What is the purpose for having both a counter and a 5 minute timeout? By having a counter running 3 times max every second , we essentially have a 3 second timeout which is much lower than There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The purpose of this was to return True 3 times before accepting it as fact. For instance if upgrade height is 72 its possible to return 72 twice and still hit 73 but 3 seconds is too long to hit the same block 3 times in a row without it having been stopped for upgrade. But this is now moot since we will use a hard chain halt height as suggested by you in your previous comment There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think that my earlier proposed solution would functionally the same way as right now, just without a counter. But yeah, if it's going to get change - it doesn't matter :) |
||
}, | ||
2*time.Minute, | ||
5*time.Second, | ||
5*time.Minute, | ||
time.Second, | ||
) | ||
s.T().Logf("reached upgrade height on %s validator container: %s", chain.ChainMeta.Id, s.valResources[chain.ChainMeta.Id][i].Container.ID) | ||
s.T().Logf("reached upgrade height on %s container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
} | ||
} | ||
|
||
// remove all containers so we can upgrade them to the new version | ||
for _, chain := range s.chains { | ||
for i := range chain.Validators { | ||
s.Require().NoError(s.dkrPool.RemoveContainerByName(s.valResources[chain.ChainMeta.Id][i].Container.Name)) | ||
var opts docker.RemoveContainerOptions | ||
opts.ID = s.valResources[chain.ChainMeta.Id][i].Container.ID | ||
opts.Force = true | ||
s.dkrPool.Client.RemoveContainer(opts) | ||
s.T().Logf("removed container: %s", s.valResources[chain.ChainMeta.Id][i].Container.Name[1:]) | ||
} | ||
} | ||
s.upgradeContainers(s.chains[0]) | ||
s.upgradeContainers(s.chains[1]) | ||
} | ||
|
||
func (s *IntegrationTestSuite) upgrade() { | ||
func (s *IntegrationTestSuite) upgradeContainers(c *chain.Chain) { | ||
// upgrade containers to the locally compiled daemon | ||
for _, chain := range s.chains { | ||
s.T().Logf("starting upgrade for chain-id: %s...", chain.ChainMeta.Id) | ||
for i, val := range chain.Validators { | ||
runOpts := &dockertest.RunOptions{ | ||
Name: val.Name, | ||
Repository: "osmosis", | ||
Tag: "debug", | ||
NetworkID: s.dkrNet.Network.ID, | ||
User: "root:root", | ||
Mounts: []string{ | ||
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir), | ||
}, | ||
} | ||
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart) | ||
s.Require().NoError(err) | ||
|
||
s.valResources[chain.ChainMeta.Id][i] = resource | ||
s.T().Logf("started Osmosis %s validator container: %s", chain.ChainMeta.Id, resource.Container.ID) | ||
s.T().Logf("starting upgrade for chain-id: %s...", c.ChainMeta.Id) | ||
pwd, err := os.Getwd() | ||
s.Require().NoError(err) | ||
for i, val := range c.Validators { | ||
runOpts := &dockertest.RunOptions{ | ||
Name: val.Name, | ||
Repository: "osmosis", | ||
Tag: "debug", | ||
NetworkID: s.dkrNet.Network.ID, | ||
User: "root:root", | ||
Mounts: []string{ | ||
fmt.Sprintf("%s/:/osmosis/.osmosisd", val.ConfigDir), | ||
fmt.Sprintf("%s/scripts:/osmosis", pwd), | ||
}, | ||
} | ||
resource, err := s.dkrPool.RunWithOptions(runOpts, noRestart) | ||
s.Require().NoError(err) | ||
|
||
s.valResources[c.ChainMeta.Id][i] = resource | ||
s.T().Logf("started %s validator container: %s", resource.Container.Name[1:], resource.Container.ID) | ||
} | ||
|
||
// check that we are hitting blocks again | ||
for _, chain := range s.chains { | ||
for i := range chain.Validators { | ||
s.Require().Eventually( | ||
func() bool { | ||
out := s.chainStatus(s.valResources[chain.ChainMeta.Id][i].Container.ID) | ||
var syncInfo syncInfo | ||
json.Unmarshal(out, &syncInfo) | ||
if syncInfo.SyncInfo.LatestHeight <= "75" { | ||
fmt.Printf("current block height is %v, waiting to hit blocks\n", syncInfo.SyncInfo.LatestHeight) | ||
} | ||
return syncInfo.SyncInfo.LatestHeight > "75" | ||
}, | ||
2*time.Minute, | ||
5*time.Second, | ||
) | ||
s.T().Logf("upgrade successful on %s validator container: %s", chain.ChainMeta.Id, s.valResources[chain.ChainMeta.Id][i].Container.ID) | ||
// check that we are creating blocks again | ||
for i := range c.Validators { | ||
if c.ChainMeta.Id == chain.ChainAID { | ||
propHeight = propHeightA | ||
} else { | ||
propHeight = propHeightB | ||
} | ||
s.Require().Eventually( | ||
func() bool { | ||
currentHeight := s.getCurrentChainHeight(s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
if currentHeight <= propHeight { | ||
s.T().Logf("current block height on %s is %v, waiting to create blocks container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], currentHeight, s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
} | ||
return currentHeight > propHeight | ||
}, | ||
5*time.Minute, | ||
time.Second, | ||
) | ||
s.T().Logf("upgrade successful on %s validator container: %s", s.valResources[c.ChainMeta.Id][i].Container.Name[1:], s.valResources[c.ChainMeta.Id][i].Container.ID) | ||
} | ||
|
||
} | ||
|
||
func (s *IntegrationTestSuite) createPreUpgradeState() { | ||
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken) | ||
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.OsmoToken) | ||
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.StakeToken) | ||
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.StakeToken) | ||
s.createPool(s.chains[0], "pool1A.json") | ||
s.createPool(s.chains[1], "pool1B.json") | ||
} | ||
|
||
func (s *IntegrationTestSuite) runPostUpgradeTests() { | ||
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.OsmoToken) | ||
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.OsmoToken) | ||
s.sendIBC(s.chains[0], s.chains[1], s.chains[1].Validators[0].PublicAddress, chain.StakeToken) | ||
s.sendIBC(s.chains[1], s.chains[0], s.chains[0].Validators[0].PublicAddress, chain.StakeToken) | ||
s.createPool(s.chains[0], "pool2A.json") | ||
s.createPool(s.chains[1], "pool2B.json") | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In the future, this can get messy. We should revise this such that we can instead pass the entire module genesis state. But for now, this is OK.