diff --git a/chain/cosmos/broadcaster.go b/chain/cosmos/broadcaster.go index 294e77c1a..9ace40b04 100644 --- a/chain/cosmos/broadcaster.go +++ b/chain/cosmos/broadcaster.go @@ -107,7 +107,7 @@ func (b *Broadcaster) GetClientContext(ctx context.Context, user User) (client.C if !ok { localDir := b.t.TempDir() containerKeyringDir := path.Join(cn.HomeDir(), "keyring-test") - kr, err := dockerutil.NewLocalKeyringFromDockerContainer(ctx, cn.DockerClient, localDir, containerKeyringDir, cn.containerID) + kr, err := dockerutil.NewLocalKeyringFromDockerContainer(ctx, cn.DockerClient, localDir, containerKeyringDir, cn.containerLifecycle.ContainerID()) if err != nil { return client.Context{}, err } diff --git a/chain/cosmos/chain_node.go b/chain/cosmos/chain_node.go index 6c749ca42..8f4a1564a 100644 --- a/chain/cosmos/chain_node.go +++ b/chain/cosmos/chain_node.go @@ -28,11 +28,7 @@ import ( "github.com/cosmos/cosmos-sdk/types" paramsutils "github.com/cosmos/cosmos-sdk/x/params/client/utils" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - dockertypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" dockerclient "github.com/docker/docker/client" - "github.com/docker/docker/errdefs" "github.com/docker/go-connections/nat" "github.com/strangelove-ventures/interchaintest/v7/ibc" "github.com/strangelove-ventures/interchaintest/v7/internal/blockdb" @@ -57,7 +53,7 @@ type ChainNode struct { lock sync.Mutex log *zap.Logger - containerID string + containerLifecycle *dockerutil.ContainerLifecycle // Ports set during StartContainer. hostRPCPort string @@ -66,6 +62,25 @@ type ChainNode struct { preStartListeners dockerutil.Listeners } +func NewChainNode(log *zap.Logger, validator bool, chain *CosmosChain, dockerClient *dockerclient.Client, networkID string, testName string, image ibc.DockerImage, index int) *ChainNode { + tn := &ChainNode{ + log: log, + + Validator: validator, + + Chain: chain, + DockerClient: dockerClient, + NetworkID: networkID, + TestName: testName, + Image: image, + Index: index, + } + + tn.containerLifecycle = dockerutil.NewContainerLifecycle(log, dockerClient, tn.Name()) + + return tn +} + // ChainNodes is a collection of ChainNode type ChainNodes []*ChainNode @@ -925,81 +940,28 @@ func (tn *ChainNode) UnsafeResetAll(ctx context.Context) error { func (tn *ChainNode) CreateNodeContainer(ctx context.Context) error { chainCfg := tn.Chain.Config() - cmd := []string{chainCfg.Bin, "start", "--home", tn.HomeDir(), "--x-crisis-skip-assert-invariants"} + + var cmd []string if chainCfg.NoHostMount { cmd = []string{"sh", "-c", fmt.Sprintf("cp -r %s %s_nomnt && %s start --home %s_nomnt --x-crisis-skip-assert-invariants", tn.HomeDir(), tn.HomeDir(), chainCfg.Bin, tn.HomeDir())} + } else { + cmd = []string{chainCfg.Bin, "start", "--home", tn.HomeDir(), "--x-crisis-skip-assert-invariants"} } - imageRef := tn.Image.Ref() - tn.logger(). - Info("Running command", - zap.String("command", strings.Join(cmd, " ")), - zap.String("container", tn.Name()), - zap.String("image", imageRef), - ) - - pb, listeners, err := dockerutil.GeneratePortBindings(sentryPorts) - if err != nil { - return fmt.Errorf("failed to generate port bindings: %w", err) - } - - tn.preStartListeners = listeners - - cc, err := tn.DockerClient.ContainerCreate( - ctx, - &container.Config{ - Image: imageRef, - - Entrypoint: []string{}, - Cmd: cmd, - - Hostname: tn.HostName(), - Labels: map[string]string{dockerutil.CleanupLabel: tn.TestName}, - - ExposedPorts: sentryPorts, - }, - &container.HostConfig{ - Binds: tn.Bind(), - PortBindings: pb, - PublishAllPorts: true, - AutoRemove: false, - DNS: []string{}, - }, - &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{ - tn.NetworkID: {}, - }, - }, - nil, - tn.Name(), - ) - if err != nil { - tn.preStartListeners.CloseAll() - return err - } - tn.containerID = cc.ID - return nil + return tn.containerLifecycle.CreateContainer(ctx, tn.TestName, tn.NetworkID, tn.Image, sentryPorts, tn.Bind(), tn.HostName(), cmd) } func (tn *ChainNode) StartContainer(ctx context.Context) error { - dockerutil.LockPortAssignment() - tn.preStartListeners.CloseAll() - err := dockerutil.StartContainer(ctx, tn.DockerClient, tn.containerID) - dockerutil.UnlockPortAssignment() - if err != nil { + if err := tn.containerLifecycle.StartContainer(ctx); err != nil { return err } - c, err := tn.DockerClient.ContainerInspect(ctx, tn.containerID) + // Set the host ports once since they will not change after the container has started. + hostPorts, err := tn.containerLifecycle.GetHostPorts(ctx, rpcPort, grpcPort) if err != nil { return err } - - // Set the host ports once since they will not change after the container has started. - tn.hostRPCPort = dockerutil.GetHostPort(c, rpcPort) - tn.hostGRPCPort = dockerutil.GetHostPort(c, grpcPort) - - tn.logger().Info("Cosmos chain node started", zap.String("container", tn.Name()), zap.String("rpc_port", tn.hostRPCPort)) + tn.hostRPCPort, tn.hostGRPCPort = hostPorts[0], hostPorts[1] err = tn.NewClient("tcp://" + tn.hostRPCPort) if err != nil { @@ -1022,19 +984,11 @@ func (tn *ChainNode) StartContainer(ctx context.Context) error { } func (tn *ChainNode) StopContainer(ctx context.Context) error { - timeout := 30 * time.Second - return tn.DockerClient.ContainerStop(ctx, tn.containerID, &timeout) + return tn.containerLifecycle.StopContainer(ctx) } func (tn *ChainNode) RemoveContainer(ctx context.Context) error { - err := tn.DockerClient.ContainerRemove(ctx, tn.containerID, dockertypes.ContainerRemoveOptions{ - Force: true, - RemoveVolumes: true, - }) - if err != nil && !errdefs.IsNotFound(err) { - return fmt.Errorf("remove container %s: %w", tn.Name(), err) - } - return nil + return tn.containerLifecycle.RemoveContainer(ctx) } // InitValidatorFiles creates the node files and signs a genesis transaction diff --git a/chain/cosmos/cosmos_chain.go b/chain/cosmos/cosmos_chain.go index c820dc2b2..a055e0524 100644 --- a/chain/cosmos/cosmos_chain.go +++ b/chain/cosmos/cosmos_chain.go @@ -547,20 +547,11 @@ func (c *CosmosChain) NewChainNode( networkID string, image ibc.DockerImage, validator bool, + index int, ) (*ChainNode, error) { // Construct the ChainNode first so we can access its name. // The ChainNode's VolumeName cannot be set until after we create the volume. - tn := &ChainNode{ - log: c.log, - - Validator: validator, - - Chain: c, - DockerClient: cli, - NetworkID: networkID, - TestName: testName, - Image: image, - } + tn := NewChainNode(c.log, validator, c, cli, networkID, testName, image, index) v, err := cli.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ Labels: map[string]string{ @@ -609,11 +600,10 @@ func (c *CosmosChain) initializeChainNodes( for i := len(c.Validators); i < c.numValidators; i++ { i := i eg.Go(func() error { - val, err := c.NewChainNode(egCtx, testName, cli, networkID, image, true) + val, err := c.NewChainNode(egCtx, testName, cli, networkID, image, true, i) if err != nil { return err } - val.Index = i newVals[i] = val return nil }) @@ -621,11 +611,10 @@ func (c *CosmosChain) initializeChainNodes( for i := len(c.FullNodes); i < c.numFullNodes; i++ { i := i eg.Go(func() error { - fn, err := c.NewChainNode(egCtx, testName, cli, networkID, image, false) + fn, err := c.NewChainNode(egCtx, testName, cli, networkID, image, false, i) if err != nil { return err } - fn.Index = i newFullNodes[i] = fn return nil }) diff --git a/chain/internal/tendermint/tendermint_node.go b/chain/internal/tendermint/tendermint_node.go index de9ec26be..fe42b4f7f 100644 --- a/chain/internal/tendermint/tendermint_node.go +++ b/chain/internal/tendermint/tendermint_node.go @@ -14,8 +14,6 @@ import ( rpcclient "github.com/cometbft/cometbft/rpc/client" rpchttp "github.com/cometbft/cometbft/rpc/client/http" libclient "github.com/cometbft/cometbft/rpc/jsonrpc/client" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" dockerclient "github.com/docker/docker/client" "github.com/docker/go-connections/nat" "github.com/hashicorp/go-version" @@ -38,9 +36,16 @@ type TendermintNode struct { TestName string Image ibc.DockerImage - containerID string + containerLifecycle *dockerutil.ContainerLifecycle +} + +func NewTendermintNode(log *zap.Logger, i int, c ibc.Chain, dockerClient *dockerclient.Client, networkID string, testName string, image ibc.DockerImage) *TendermintNode { + tn := &TendermintNode{Log: log, Index: i, Chain: c, + DockerClient: dockerClient, NetworkID: networkID, TestName: testName, Image: image} - preStartListeners dockerutil.Listeners + tn.containerLifecycle = dockerutil.NewContainerLifecycle(log, dockerClient, tn.Name()) + + return tn } // TendermintNodes is a collection of TendermintNode @@ -220,75 +225,26 @@ func (tn *TendermintNode) CreateNodeContainer(ctx context.Context, additionalFla chainCfg := tn.Chain.Config() cmd := []string{chainCfg.Bin, "start", "--home", tn.HomeDir()} cmd = append(cmd, additionalFlags...) - fmt.Printf("{%s} -> '%s'\n", tn.Name(), strings.Join(cmd, " ")) - - pb, listeners, err := dockerutil.GeneratePortBindings(sentryPorts) - if err != nil { - return fmt.Errorf("failed to generate port bindings: %w", err) - } - tn.preStartListeners = listeners - - cc, err := tn.DockerClient.ContainerCreate( - ctx, - &container.Config{ - Image: tn.Image.Ref(), - - Entrypoint: []string{}, - Cmd: cmd, - - Hostname: tn.HostName(), - - Labels: map[string]string{dockerutil.CleanupLabel: tn.TestName}, - - ExposedPorts: sentryPorts, - }, - &container.HostConfig{ - Binds: tn.Bind(), - PortBindings: pb, - PublishAllPorts: true, - AutoRemove: false, - DNS: []string{}, - }, - &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{ - tn.NetworkID: {}, - }, - }, - nil, - tn.Name(), - ) - if err != nil { - tn.preStartListeners.CloseAll() - return err - } - tn.containerID = cc.ID - return nil + return tn.containerLifecycle.CreateContainer(ctx, tn.TestName, tn.NetworkID, tn.Image, sentryPorts, tn.Bind(), tn.HostName(), cmd) } func (tn *TendermintNode) StopContainer(ctx context.Context) error { - timeout := 30 * time.Second - return tn.DockerClient.ContainerStop(ctx, tn.containerID, &timeout) + return tn.containerLifecycle.StopContainer(ctx) } func (tn *TendermintNode) StartContainer(ctx context.Context) error { - dockerutil.LockPortAssignment() - tn.preStartListeners.CloseAll() - err := dockerutil.StartContainer(ctx, tn.DockerClient, tn.containerID) - dockerutil.UnlockPortAssignment() - if err != nil { + if err := tn.containerLifecycle.StartContainer(ctx); err != nil { return err } - c, err := tn.DockerClient.ContainerInspect(ctx, tn.containerID) + hostPorts, err := tn.containerLifecycle.GetHostPorts(ctx, rpcPort) if err != nil { return err } + rpcPort := hostPorts[0] - port := dockerutil.GetHostPort(c, rpcPort) - fmt.Printf("{%s} RPC => %s\n", tn.Name(), port) - - err = tn.NewClient(fmt.Sprintf("tcp://%s", port)) + err = tn.NewClient(fmt.Sprintf("tcp://%s", rpcPort)) if err != nil { return err } diff --git a/chain/penumbra/penumbra_app_node.go b/chain/penumbra/penumbra_app_node.go index e304902e8..3f466d5bb 100644 --- a/chain/penumbra/penumbra_app_node.go +++ b/chain/penumbra/penumbra_app_node.go @@ -7,10 +7,7 @@ import ( "fmt" "path/filepath" "strings" - "time" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" "github.com/strangelove-ventures/interchaintest/v7/ibc" @@ -29,7 +26,7 @@ type PenumbraAppNode struct { DockerClient *client.Client Image ibc.DockerImage - containerID string + containerLifecycle *dockerutil.ContainerLifecycle // Set during StartContainer. hostRPCPort string @@ -216,74 +213,25 @@ func (p *PenumbraAppNode) SendIBCTransfer( func (p *PenumbraAppNode) CreateNodeContainer(ctx context.Context) error { cmd := []string{"pd", "start", "--host", "0.0.0.0", "--home", p.HomeDir()} - fmt.Printf("{%s} -> '%s'\n", p.Name(), strings.Join(cmd, " ")) - pb, listeners, err := dockerutil.GeneratePortBindings(exposedPorts) - if err != nil { - return fmt.Errorf("failed to generate port bindings: %w", err) - } - - p.preStartListeners = listeners - - cc, err := p.DockerClient.ContainerCreate( - ctx, - &container.Config{ - Image: p.Image.Ref(), - - Entrypoint: []string{}, - Cmd: cmd, - - Hostname: p.HostName(), - User: p.Image.UidGid, - - Labels: map[string]string{dockerutil.CleanupLabel: p.TestName}, - - ExposedPorts: exposedPorts, - }, - &container.HostConfig{ - Binds: p.Bind(), - PortBindings: pb, - PublishAllPorts: true, - AutoRemove: false, - DNS: []string{}, - }, - &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{ - p.NetworkID: {}, - }, - }, - nil, - p.Name(), - ) - if err != nil { - p.preStartListeners.CloseAll() - return err - } - p.containerID = cc.ID - return nil + return p.containerLifecycle.CreateContainer(ctx, p.TestName, p.NetworkID, p.Image, exposedPorts, p.Bind(), p.HostName(), cmd) } func (p *PenumbraAppNode) StopContainer(ctx context.Context) error { - timeout := 30 * time.Second - return p.DockerClient.ContainerStop(ctx, p.containerID, &timeout) + return p.containerLifecycle.StopContainer(ctx) } func (p *PenumbraAppNode) StartContainer(ctx context.Context) error { - dockerutil.LockPortAssignment() - p.preStartListeners.CloseAll() - err := dockerutil.StartContainer(ctx, p.DockerClient, p.containerID) - dockerutil.UnlockPortAssignment() - if err != nil { + if err := p.containerLifecycle.StartContainer(ctx); err != nil { return err } - c, err := p.DockerClient.ContainerInspect(ctx, p.containerID) + hostPorts, err := p.containerLifecycle.GetHostPorts(ctx, rpcPort, grpcPort) if err != nil { return err } - p.hostRPCPort = dockerutil.GetHostPort(c, rpcPort) - p.hostGRPCPort = dockerutil.GetHostPort(c, grpcPort) + p.hostRPCPort, p.hostGRPCPort = hostPorts[0], hostPorts[1] return nil } diff --git a/chain/penumbra/penumbra_chain.go b/chain/penumbra/penumbra_chain.go index 92ec8a0e1..69b559071 100644 --- a/chain/penumbra/penumbra_chain.go +++ b/chain/penumbra/penumbra_chain.go @@ -18,6 +18,7 @@ import ( "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" + dockerclient "github.com/docker/docker/client" "github.com/strangelove-ventures/interchaintest/v7/chain/internal/tendermint" "github.com/strangelove-ventures/interchaintest/v7/ibc" "github.com/strangelove-ventures/interchaintest/v7/internal/dockerutil" @@ -246,6 +247,78 @@ func (c *PenumbraChain) GetGasFeesInNativeDenom(gasPaid int64) int64 { return int64(fees) } +// NewChainNode returns a penumbra chain node with tendermint and penumbra nodes +// with docker volumes created. +func (c *PenumbraChain) NewChainNode( + ctx context.Context, + i int, + dockerClient *dockerclient.Client, + networkID string, + testName string, + tendermintImage ibc.DockerImage, + penumbraImage ibc.DockerImage, +) (PenumbraNode, error) { + tn := tendermint.NewTendermintNode(c.log, i, c, dockerClient, networkID, testName, tendermintImage) + + tv, err := dockerClient.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ + Labels: map[string]string{ + dockerutil.CleanupLabel: testName, + + dockerutil.NodeOwnerLabel: tn.Name(), + }, + }) + if err != nil { + return PenumbraNode{}, fmt.Errorf("creating tendermint volume: %w", err) + } + tn.VolumeName = tv.Name + if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ + Log: c.log, + + Client: dockerClient, + + VolumeName: tn.VolumeName, + ImageRef: tn.Image.Ref(), + TestName: tn.TestName, + UidGid: tn.Image.UidGid, + }); err != nil { + return PenumbraNode{}, fmt.Errorf("set tendermint volume owner: %w", err) + } + + pn := &PenumbraAppNode{log: c.log, Index: i, Chain: c, + DockerClient: dockerClient, NetworkID: networkID, TestName: testName, Image: penumbraImage} + + pn.containerLifecycle = dockerutil.NewContainerLifecycle(c.log, dockerClient, pn.Name()) + + pv, err := dockerClient.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ + Labels: map[string]string{ + dockerutil.CleanupLabel: testName, + + dockerutil.NodeOwnerLabel: pn.Name(), + }, + }) + if err != nil { + return PenumbraNode{}, fmt.Errorf("creating penumbra volume: %w", err) + } + pn.VolumeName = pv.Name + if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ + Log: c.log, + + Client: dockerClient, + + VolumeName: pn.VolumeName, + ImageRef: pn.Image.Ref(), + TestName: pn.TestName, + UidGid: tn.Image.UidGid, + }); err != nil { + return PenumbraNode{}, fmt.Errorf("set penumbra volume owner: %w", err) + } + + return PenumbraNode{ + TendermintNode: tn, + PenumbraAppNode: pn, + }, nil +} + // creates the test node objects required for bootstrapping tests func (c *PenumbraChain) initializeChainNodes( ctx context.Context, @@ -274,60 +347,11 @@ func (c *PenumbraChain) initializeChainNodes( } } for i := 0; i < count; i++ { - tn := &tendermint.TendermintNode{Log: c.log, Index: i, Chain: c, - DockerClient: cli, NetworkID: networkID, TestName: testName, Image: chainCfg.Images[0]} - - tv, err := cli.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ - Labels: map[string]string{ - dockerutil.CleanupLabel: testName, - - dockerutil.NodeOwnerLabel: tn.Name(), - }, - }) - if err != nil { - return fmt.Errorf("creating tendermint volume: %w", err) - } - tn.VolumeName = tv.Name - if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ - Log: c.log, - - Client: cli, - - VolumeName: tn.VolumeName, - ImageRef: tn.Image.Ref(), - TestName: tn.TestName, - UidGid: tn.Image.UidGid, - }); err != nil { - return fmt.Errorf("set tendermint volume owner: %w", err) - } - - pn := &PenumbraAppNode{log: c.log, Index: i, Chain: c, - DockerClient: cli, NetworkID: networkID, TestName: testName, Image: chainCfg.Images[1]} - pv, err := cli.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ - Labels: map[string]string{ - dockerutil.CleanupLabel: testName, - - dockerutil.NodeOwnerLabel: pn.Name(), - }, - }) + pn, err := c.NewChainNode(ctx, i, cli, networkID, testName, chainCfg.Images[0], chainCfg.Images[1]) if err != nil { - return fmt.Errorf("creating penumbra volume: %w", err) - } - pn.VolumeName = pv.Name - if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ - Log: c.log, - - Client: cli, - - VolumeName: pn.VolumeName, - ImageRef: pn.Image.Ref(), - TestName: pn.TestName, - UidGid: tn.Image.UidGid, - }); err != nil { - return fmt.Errorf("set penumbra volume owner: %w", err) + return err } - - penumbraNodes = append(penumbraNodes, PenumbraNode{TendermintNode: tn, PenumbraAppNode: pn}) + penumbraNodes = append(penumbraNodes, pn) } c.PenumbraNodes = penumbraNodes diff --git a/chain/polkadot/parachain_node.go b/chain/polkadot/parachain_node.go index 6584b1727..11a36d049 100644 --- a/chain/polkadot/parachain_node.go +++ b/chain/polkadot/parachain_node.go @@ -7,12 +7,9 @@ import ( "fmt" "path/filepath" "strings" - "time" "github.com/avast/retry-go/v4" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/icza/dyno" p2pcrypto "github.com/libp2p/go-libp2p-core/crypto" @@ -28,11 +25,11 @@ type ParachainNode struct { TestName string Index int - NetworkID string - containerID string - VolumeName string - DockerClient *client.Client - Image ibc.DockerImage + NetworkID string + containerLifecycle *dockerutil.ContainerLifecycle + VolumeName string + DockerClient *client.Client + Image ibc.DockerImage Chain ibc.Chain Bin string @@ -248,81 +245,28 @@ func (pn *ParachainNode) CreateNodeContainer(ctx context.Context) error { cmd = append(cmd, pn.Flags...) cmd = append(cmd, "--", fmt.Sprintf("--chain=%s", pn.RawRelayChainSpecFilePathFull())) cmd = append(cmd, pn.RelayChainFlags...) - pn.logger(). - Info("Running command", - zap.String("command", strings.Join(cmd, " ")), - zap.String("container", pn.Name()), - ) - pb, listeners, err := dockerutil.GeneratePortBindings(exposedPorts) - if err != nil { - return fmt.Errorf("failed to generate port bindings: %w", err) - } - - pn.preStartListeners = listeners - - cc, err := pn.DockerClient.ContainerCreate( - ctx, - &container.Config{ - Image: pn.Image.Ref(), - - Entrypoint: []string{}, - Cmd: cmd, - - Hostname: pn.HostName(), - User: pn.Image.UidGid, - - Labels: map[string]string{dockerutil.CleanupLabel: pn.TestName}, - - ExposedPorts: exposedPorts, - }, - &container.HostConfig{ - Binds: pn.Bind(), - PortBindings: pb, - PublishAllPorts: true, - AutoRemove: false, - DNS: []string{}, - }, - &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{ - pn.NetworkID: {}, - }, - }, - nil, - pn.Name(), - ) - if err != nil { - pn.preStartListeners.CloseAll() - return err - } - pn.containerID = cc.ID - return nil + return pn.containerLifecycle.CreateContainer(ctx, pn.TestName, pn.NetworkID, pn.Image, exposedPorts, pn.Bind(), pn.HostName(), cmd) } // StopContainer stops the relay chain node container, waiting at most 30 seconds. func (pn *ParachainNode) StopContainer(ctx context.Context) error { - timeout := 30 * time.Second - return pn.DockerClient.ContainerStop(ctx, pn.containerID, &timeout) + return pn.containerLifecycle.StopContainer(ctx) } // StartContainer starts the container after it is built by CreateNodeContainer. func (pn *ParachainNode) StartContainer(ctx context.Context) error { - dockerutil.LockPortAssignment() - pn.preStartListeners.CloseAll() - err := dockerutil.StartContainer(ctx, pn.DockerClient, pn.containerID) - dockerutil.UnlockPortAssignment() - if err != nil { + if err := pn.containerLifecycle.StartContainer(ctx); err != nil { return err } - c, err := pn.DockerClient.ContainerInspect(ctx, pn.containerID) + hostPorts, err := pn.containerLifecycle.GetHostPorts(ctx, wsPort, rpcPort) if err != nil { return err } // Set the host ports once since they will not change after the container has started. - pn.hostWsPort = dockerutil.GetHostPort(c, wsPort) - pn.hostRpcPort = dockerutil.GetHostPort(c, rpcPort) + pn.hostWsPort, pn.hostRpcPort = hostPorts[0], hostPorts[1] explorerUrl := fmt.Sprintf("\033[4;34mhttps://polkadot.js.org/apps?rpc=ws://%s#/explorer\033[0m", strings.Replace(pn.hostWsPort, "localhost", "127.0.0.1", 1)) diff --git a/chain/polkadot/polkadot_chain.go b/chain/polkadot/polkadot_chain.go index 1e384e2bf..2239ec98d 100644 --- a/chain/polkadot/polkadot_chain.go +++ b/chain/polkadot/polkadot_chain.go @@ -2,11 +2,11 @@ package polkadot import ( "context" + "crypto/rand" crand "crypto/rand" "encoding/json" "fmt" "io" - "math/rand" "strings" "github.com/99designs/keyring" @@ -17,6 +17,7 @@ import ( "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" "github.com/docker/docker/client" + dockerclient "github.com/docker/docker/client" "github.com/icza/dyno" p2pcrypto "github.com/libp2p/go-libp2p-core/crypto" "github.com/strangelove-ventures/interchaintest/v7/ibc" @@ -88,6 +89,148 @@ func (c *PolkadotChain) Config() ibc.ChainConfig { return c.cfg } +func (c *PolkadotChain) NewRelayChainNode( + ctx context.Context, + i int, + chain *PolkadotChain, + dockerClient *dockerclient.Client, + networkID string, + testName string, + image ibc.DockerImage, +) (*RelayChainNode, error) { + seed := make([]byte, 32) + if _, err := rand.Read(seed); err != nil { + return nil, err + } + + nodeKey, _, err := p2pcrypto.GenerateEd25519Key(crand.Reader) + if err != nil { + return nil, fmt.Errorf("error generating node key: %w", err) + } + + nameCased := namecase.New().NameCase(IndexedName[i]) + + ed25519PrivKey, err := DeriveEd25519FromName(nameCased) + if err != nil { + return nil, err + } + + accountKeyName := IndexedName[i] + accountKeyUri := IndexedUri[i] + stashKeyName := accountKeyName + "stash" + stashKeyUri := accountKeyUri + "//stash" + + if err := c.RecoverKey(ctx, accountKeyName, accountKeyUri); err != nil { + return nil, err + } + + if err := c.RecoverKey(ctx, stashKeyName, stashKeyUri); err != nil { + return nil, err + } + + ecdsaPrivKey, err := DeriveSecp256k1FromName(nameCased) + if err != nil { + return nil, fmt.Errorf("error generating secp256k1 private key: %w", err) + } + + pn := &RelayChainNode{ + log: c.log, + Index: i, + Chain: c, + DockerClient: dockerClient, + NetworkID: networkID, + TestName: testName, + Image: image, + NodeKey: nodeKey, + Ed25519PrivateKey: ed25519PrivKey, + AccountKeyName: accountKeyName, + StashKeyName: stashKeyName, + EcdsaPrivateKey: *ecdsaPrivKey, + } + + pn.containerLifecycle = dockerutil.NewContainerLifecycle(c.log, dockerClient, pn.Name()) + + v, err := dockerClient.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ + Labels: map[string]string{ + dockerutil.CleanupLabel: testName, + + dockerutil.NodeOwnerLabel: pn.Name(), + }, + }) + if err != nil { + return nil, fmt.Errorf("creating volume for chain node: %w", err) + } + pn.VolumeName = v.Name + + if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ + Log: c.log, + Client: dockerClient, + VolumeName: v.Name, + ImageRef: image.Ref(), + TestName: testName, + UidGid: image.UidGid, + }); err != nil { + return nil, fmt.Errorf("set volume owner: %w", err) + } + + return pn, nil +} + +func (c *PolkadotChain) NewParachainNode( + ctx context.Context, + i int, + dockerClient *dockerclient.Client, + networkID string, + testName string, + parachainConfig ParachainConfig, +) (*ParachainNode, error) { + nodeKey, _, err := p2pcrypto.GenerateEd25519Key(crand.Reader) + if err != nil { + return nil, fmt.Errorf("error generating node key: %w", err) + } + pn := &ParachainNode{ + log: c.log, + Index: i, + Chain: c, + DockerClient: dockerClient, + NetworkID: networkID, + TestName: testName, + NodeKey: nodeKey, + Image: parachainConfig.Image, + Bin: parachainConfig.Bin, + ChainID: parachainConfig.ChainID, + Flags: parachainConfig.Flags, + RelayChainFlags: parachainConfig.RelayChainFlags, + } + + pn.containerLifecycle = dockerutil.NewContainerLifecycle(c.log, dockerClient, pn.Name()) + + v, err := dockerClient.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ + Labels: map[string]string{ + dockerutil.CleanupLabel: testName, + + dockerutil.NodeOwnerLabel: pn.Name(), + }, + }) + if err != nil { + return nil, fmt.Errorf("creating volume for chain node: %w", err) + } + pn.VolumeName = v.Name + + if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ + Log: c.log, + Client: dockerClient, + VolumeName: v.Name, + ImageRef: parachainConfig.Image.Ref(), + TestName: testName, + UidGid: parachainConfig.Image.UidGid, + }); err != nil { + return nil, fmt.Errorf("set volume owner: %w", err) + } + + return pn, nil +} + // Initialize initializes node structs so that things like initializing keys can be done before starting the chain. // Implements Chain interface. func (c *PolkadotChain) Initialize(ctx context.Context, testName string, cli *client.Client, networkID string) error { @@ -116,121 +259,19 @@ func (c *PolkadotChain) Initialize(ctx context.Context, testName string, cli *cl } } for i := 0; i < c.numRelayChainNodes; i++ { - seed := make([]byte, 32) - rand.Read(seed) - - nodeKey, _, err := p2pcrypto.GenerateEd25519Key(crand.Reader) - if err != nil { - return fmt.Errorf("error generating node key: %w", err) - } - - nameCased := namecase.New().NameCase(IndexedName[i]) - - ed25519PrivKey, err := DeriveEd25519FromName(nameCased) - if err != nil { - return err - } - - accountKeyName := IndexedName[i] - accountKeyUri := IndexedUri[i] - stashKeyName := accountKeyName + "stash" - stashKeyUri := accountKeyUri + "//stash" - err = c.RecoverKey(ctx, accountKeyName, accountKeyUri) - if err != nil { - return err - } - err = c.RecoverKey(ctx, stashKeyName, stashKeyUri) + pn, err := c.NewRelayChainNode(ctx, i, c, cli, networkID, testName, chainCfg.Images[0]) if err != nil { return err } - - ecdsaPrivKey, err := DeriveSecp256k1FromName(nameCased) - if err != nil { - return fmt.Errorf("error generating secp256k1 private key: %w", err) - } - pn := &RelayChainNode{ - log: c.log, - Index: i, - Chain: c, - DockerClient: cli, - NetworkID: networkID, - TestName: testName, - Image: chainCfg.Images[0], - NodeKey: nodeKey, - Ed25519PrivateKey: ed25519PrivKey, - AccountKeyName: accountKeyName, - StashKeyName: stashKeyName, - EcdsaPrivateKey: *ecdsaPrivKey, - } - - v, err := cli.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ - Labels: map[string]string{ - dockerutil.CleanupLabel: testName, - - dockerutil.NodeOwnerLabel: pn.Name(), - }, - }) - if err != nil { - return fmt.Errorf("creating volume for chain node: %w", err) - } - pn.VolumeName = v.Name - - if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ - Log: c.log, - Client: cli, - VolumeName: v.Name, - ImageRef: chainCfg.Images[0].Ref(), - TestName: testName, - UidGid: chainCfg.Images[0].UidGid, - }); err != nil { - return fmt.Errorf("set volume owner: %w", err) - } - relayChainNodes = append(relayChainNodes, pn) } c.RelayChainNodes = relayChainNodes - for _, parachainConfig := range c.parachainConfig { + for _, pc := range c.parachainConfig { parachainNodes := []*ParachainNode{} - for i := 0; i < parachainConfig.NumNodes; i++ { - nodeKey, _, err := p2pcrypto.GenerateEd25519Key(crand.Reader) + for i := 0; i < pc.NumNodes; i++ { + pn, err := c.NewParachainNode(ctx, i, cli, networkID, testName, pc) if err != nil { - return fmt.Errorf("error generating node key: %w", err) - } - pn := &ParachainNode{ - log: c.log, - Index: i, - Chain: c, - DockerClient: cli, - NetworkID: networkID, - TestName: testName, - NodeKey: nodeKey, - Image: parachainConfig.Image, - Bin: parachainConfig.Bin, - ChainID: parachainConfig.ChainID, - Flags: parachainConfig.Flags, - RelayChainFlags: parachainConfig.RelayChainFlags, - } - v, err := cli.VolumeCreate(ctx, volumetypes.VolumeCreateBody{ - Labels: map[string]string{ - dockerutil.CleanupLabel: testName, - - dockerutil.NodeOwnerLabel: pn.Name(), - }, - }) - if err != nil { - return fmt.Errorf("creating volume for chain node: %w", err) - } - pn.VolumeName = v.Name - - if err := dockerutil.SetVolumeOwner(ctx, dockerutil.VolumeOwnerOptions{ - Log: c.log, - Client: cli, - VolumeName: v.Name, - ImageRef: parachainConfig.Image.Ref(), - TestName: testName, - UidGid: parachainConfig.Image.UidGid, - }); err != nil { - return fmt.Errorf("set volume owner: %w", err) + return err } parachainNodes = append(parachainNodes, pn) } diff --git a/chain/polkadot/relay_chain_node.go b/chain/polkadot/relay_chain_node.go index 36ad3ee7a..04aa5dc37 100644 --- a/chain/polkadot/relay_chain_node.go +++ b/chain/polkadot/relay_chain_node.go @@ -10,8 +10,6 @@ import ( "github.com/avast/retry-go/v4" gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" @@ -30,11 +28,11 @@ type RelayChainNode struct { TestName string Index int - NetworkID string - containerID string - VolumeName string - DockerClient *client.Client - Image ibc.DockerImage + NetworkID string + containerLifecycle *dockerutil.ContainerLifecycle + VolumeName string + DockerClient *client.Client + Image ibc.DockerImage Chain ibc.Chain NodeKey p2pCrypto.PrivKey @@ -224,81 +222,27 @@ func (p *RelayChainNode) CreateNodeContainer(ctx context.Context) error { fmt.Sprintf("--public-addr=%s", multiAddress), "--base-path", p.NodeHome(), } - p.logger(). - Info("Running command", - zap.String("command", strings.Join(cmd, " ")), - zap.String("container", p.Name()), - ) - - pb, listeners, err := dockerutil.GeneratePortBindings(exposedPorts) - if err != nil { - return fmt.Errorf("failed to generate port bindings: %w", err) - } - - p.preStartListeners = listeners - - cc, err := p.DockerClient.ContainerCreate( - ctx, - &container.Config{ - Image: p.Image.Ref(), - - Entrypoint: []string{}, - Cmd: cmd, - - Hostname: p.HostName(), - User: p.Image.UidGid, - - Labels: map[string]string{dockerutil.CleanupLabel: p.TestName}, - - ExposedPorts: exposedPorts, - }, - &container.HostConfig{ - Binds: p.Bind(), - PortBindings: pb, - PublishAllPorts: true, - AutoRemove: false, - DNS: []string{}, - }, - &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{ - p.NetworkID: {}, - }, - }, - nil, - p.Name(), - ) - if err != nil { - p.preStartListeners.CloseAll() - return err - } - p.containerID = cc.ID - return nil + return p.containerLifecycle.CreateContainer(ctx, p.TestName, p.NetworkID, p.Image, exposedPorts, p.Bind(), p.HostName(), cmd) } // StopContainer stops the relay chain node container, waiting at most 30 seconds. func (p *RelayChainNode) StopContainer(ctx context.Context) error { - timeout := 30 * time.Second - return p.DockerClient.ContainerStop(ctx, p.containerID, &timeout) + return p.containerLifecycle.StopContainer(ctx) } // StartContainer starts the container after it is built by CreateNodeContainer. func (p *RelayChainNode) StartContainer(ctx context.Context) error { - dockerutil.LockPortAssignment() - p.preStartListeners.CloseAll() - err := dockerutil.StartContainer(ctx, p.DockerClient, p.containerID) - dockerutil.UnlockPortAssignment() - if err != nil { + if err := p.containerLifecycle.StartContainer(ctx); err != nil { return err } - c, err := p.DockerClient.ContainerInspect(ctx, p.containerID) + hostPorts, err := p.containerLifecycle.GetHostPorts(ctx, wsPort, rpcPort) if err != nil { return err } // Set the host ports once since they will not change after the container has started. - p.hostWsPort = dockerutil.GetHostPort(c, wsPort) - p.hostRpcPort = dockerutil.GetHostPort(c, rpcPort) + p.hostWsPort, p.hostRpcPort = hostPorts[0], hostPorts[1] p.logger().Info("Waiting for RPC endpoint to be available", zap.String("container", p.Name())) explorerUrl := fmt.Sprintf("\033[4;34mhttps://polkadot.js.org/apps?rpc=ws://%s#/explorer\033[0m", diff --git a/internal/dockerutil/container_lifecycle.go b/internal/dockerutil/container_lifecycle.go new file mode 100644 index 000000000..d16e90d45 --- /dev/null +++ b/internal/dockerutil/container_lifecycle.go @@ -0,0 +1,147 @@ +package dockerutil + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + dockertypes "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + dockerclient "github.com/docker/docker/client" + "github.com/docker/docker/errdefs" + "github.com/docker/go-connections/nat" + "github.com/strangelove-ventures/interchaintest/v7/ibc" + "go.uber.org/zap" +) + +type ContainerLifecycle struct { + log *zap.Logger + client *dockerclient.Client + containerName string + id string + preStartListeners Listeners +} + +func NewContainerLifecycle(log *zap.Logger, client *dockerclient.Client, containerName string) *ContainerLifecycle { + return &ContainerLifecycle{ + log: log, + client: client, + containerName: containerName, + } +} + +func (c *ContainerLifecycle) CreateContainer( + ctx context.Context, + testName string, + networkID string, + image ibc.DockerImage, + ports nat.PortSet, + volumeBinds []string, + hostName string, + cmd []string, +) error { + imageRef := image.Ref() + c.log.Info( + "Will run command", + zap.String("image", imageRef), + zap.String("container", c.containerName), + zap.String("command", strings.Join(cmd, " ")), + ) + + pb, listeners, err := GeneratePortBindings(ports) + if err != nil { + return fmt.Errorf("failed to generate port bindings: %w", err) + } + + c.preStartListeners = listeners + + cc, err := c.client.ContainerCreate( + ctx, + &container.Config{ + Image: imageRef, + + Entrypoint: []string{}, + Cmd: cmd, + + Hostname: hostName, + + Labels: map[string]string{CleanupLabel: testName}, + + ExposedPorts: ports, + }, + &container.HostConfig{ + Binds: volumeBinds, + PortBindings: pb, + PublishAllPorts: true, + AutoRemove: false, + DNS: []string{}, + }, + &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + networkID: {}, + }, + }, + nil, + c.containerName, + ) + if err != nil { + listeners.CloseAll() + c.preStartListeners = []net.Listener{} + return err + } + c.id = cc.ID + return nil +} + +func (c *ContainerLifecycle) StartContainer(ctx context.Context) error { + // lock port allocation for the time between freeing the ports from the + // temporary listeners to the consumption of the ports by the container + mu.RLock() + defer mu.RUnlock() + + c.preStartListeners.CloseAll() + c.preStartListeners = []net.Listener{} + + if err := StartContainer(ctx, c.client, c.id); err != nil { + return err + } + + c.log.Info("Container started", zap.String("container", c.containerName)) + + return nil +} + +func (c *ContainerLifecycle) StopContainer(ctx context.Context) error { + timeout := 30 * time.Second + return c.client.ContainerStop(ctx, c.id, &timeout) +} + +func (c *ContainerLifecycle) RemoveContainer(ctx context.Context) error { + err := c.client.ContainerRemove(ctx, c.id, dockertypes.ContainerRemoveOptions{ + Force: true, + RemoveVolumes: true, + }) + if err != nil && !errdefs.IsNotFound(err) { + return fmt.Errorf("remove container %s: %w", c.containerName, err) + } + return nil +} + +func (c *ContainerLifecycle) ContainerID() string { + return c.id +} + +func (c *ContainerLifecycle) GetHostPorts(ctx context.Context, portIDs ...string) ([]string, error) { + cjson, err := c.client.ContainerInspect(ctx, c.id) + if err != nil { + return nil, err + } + ports := make([]string, len(portIDs)) + for i, p := range portIDs { + ports[i] = GetHostPort(cjson, p) + } + return ports, nil +} diff --git a/internal/dockerutil/ports.go b/internal/dockerutil/ports.go index 12c6de0a0..a4f12db1a 100644 --- a/internal/dockerutil/ports.go +++ b/internal/dockerutil/ports.go @@ -8,7 +8,7 @@ import ( "github.com/docker/go-connections/nat" ) -var mu sync.Mutex +var mu sync.RWMutex type Listeners []net.Listener @@ -18,14 +18,6 @@ func (l Listeners) CloseAll() { } } -func LockPortAssignment() { - mu.Lock() -} - -func UnlockPortAssignment() { - mu.Unlock() -} - // openListenerOnFreePort opens the next free port func openListenerOnFreePort() (*net.TCPListener, error) { addr, err := net.ResolveTCPAddr("tcp", "localhost:0") @@ -33,8 +25,8 @@ func openListenerOnFreePort() (*net.TCPListener, error) { return nil, err } - LockPortAssignment() - defer UnlockPortAssignment() + mu.Lock() + defer mu.Unlock() l, err := net.ListenTCP("tcp", addr) if err != nil { return nil, err @@ -62,7 +54,7 @@ func nextAvailablePort() (nat.PortBinding, *net.TCPListener, error) { // GeneratePortBindings will find open ports on the local // machine and create a PortBinding for every port in the portSet. -func GeneratePortBindings(portSet nat.PortSet) (nat.PortMap, []net.Listener, error) { +func GeneratePortBindings(portSet nat.PortSet) (nat.PortMap, Listeners, error) { m := make(nat.PortMap) listeners := make(Listeners, 0, len(portSet)) diff --git a/relayer/rly/cosmos_relayer.go b/relayer/rly/cosmos_relayer.go index 554a29493..d00920965 100644 --- a/relayer/rly/cosmos_relayer.go +++ b/relayer/rly/cosmos_relayer.go @@ -66,7 +66,7 @@ type CosmosRelayerChainConfig struct { const ( DefaultContainerImage = "ghcr.io/cosmos/relayer" - DefaultContainerVersion = "v2.3.0-rc1" + DefaultContainerVersion = "andrew-fix_ordered_channel_closure" ) // Capabilities returns the set of capabilities of the Cosmos relayer.