diff --git a/docker_auth_test.go b/docker_auth_test.go
index d494d6d12e..b8580a08ea 100644
--- a/docker_auth_test.go
+++ b/docker_auth_test.go
@@ -286,7 +286,7 @@ func prepareLocalRegistryWithAuth(t *testing.T) string {
ContainerFilePath: "/data",
},
},
- WaitingFor: wait.ForExposedPort(),
+ WaitingFor: wait.ForHTTP("/").WithPort("5000/tcp"),
}
// }
@@ -311,9 +311,6 @@ func prepareLocalRegistryWithAuth(t *testing.T) string {
removeImageFromLocalCache(t, addr+"/redis:5.0-alpine")
})
- _, cancel := context.WithCancel(context.Background())
- t.Cleanup(cancel)
-
return addr
}
diff --git a/docker_test.go b/docker_test.go
index fdc3196dae..b0a78b346d 100644
--- a/docker_test.go
+++ b/docker_test.go
@@ -658,14 +658,12 @@ func TestContainerCreationTimesOutWithHttp(t *testing.T) {
ExposedPorts: []string{
nginxDefaultPort,
},
- WaitingFor: wait.ForHTTP("/").WithStartupTimeout(1 * time.Second),
+ WaitingFor: wait.ForHTTP("/").WithStartupTimeout(time.Millisecond * 500),
},
Started: true,
})
CleanupContainer(t, nginxC)
- if err == nil {
- t.Error("Expected timeout")
- }
+ require.Error(t, err, "expected timeout")
}
func TestContainerCreationWaitsForLogContextTimeout(t *testing.T) {
diff --git a/docs/modules/redpanda.md b/docs/modules/redpanda.md
index f923b8be09..028dbaf95f 100644
--- a/docs/modules/redpanda.md
+++ b/docs/modules/redpanda.md
@@ -61,6 +61,8 @@ If you need to enable TLS use `WithTLS` with a valid PEM encoded certificate and
#### Additional Listener
+- Since testcontainers-go :material-tag: v0.28.0
+
There are scenarios where additional listeners are needed, for example if you
want to consume/from another container in the same network
@@ -79,12 +81,77 @@ Produce messages using the new registered listener
[Produce/consume via registered listener](../../modules/redpanda/redpanda_test.go) inside_block:withListenerExec
+#### Adding Service Accounts
+
+- Since testcontainers-go :material-tag: v0.20.0
+
+It's possible to add service accounts to the Redpanda container using the `WithNewServiceAccount` option, setting the service account name and its password.
+E.g. `WithNewServiceAccount("service-account", "password")`.
+
+#### Adding Super Users
+
+- Since testcontainers-go :material-tag: v0.20.0
+
+When a super user is needed, you can use the `WithSuperusers` option, passing a variadic list of super users.
+E.g. `WithSuperusers("superuser-1", "superuser-2")`.
+
+#### Enabling SASL
+
+- Since testcontainers-go :material-tag: v0.20.0
+
+The `WithEnableSASL()` option enables SASL scram sha authentication. By default, no authentication (plaintext) is used.
+When setting an authentication method, make sure to add users as well and authorize them using the `WithSuperusers()` option.
+
+#### WithEnableKafkaAuthorization
+
+- Since testcontainers-go :material-tag: v0.20.0
+
+The `WithEnableKafkaAuthorization` enables authorization for connections on the Kafka API.
+
+#### WithEnableWasmTransform
+
+- Since testcontainers-go :material-tag: v0.28.0
+
+The `WithEnableWasmTransform` enables wasm transform.
+
+!!!warning
+ Should not be used with RP versions before 23.3
+
+#### WithEnableSchemaRegistryHTTPBasicAuth
+
+- Since testcontainers-go :material-tag: v0.20.0
+
+The `WithEnableSchemaRegistryHTTPBasicAuth` enables HTTP basic authentication for the Schema Registry.
+
+#### WithAutoCreateTopics
+
+- Since testcontainers-go :material-tag: v0.22.0
+
+The `WithAutoCreateTopics` option enables the auto-creation of topics.
+
+#### WithTLS
+
+- Since testcontainers-go :material-tag: v0.24.0
+
+The `WithTLS` option enables TLS encryption. It requires a valid PEM encoded certificate and key, passed as byte slices.
+E.g. `WithTLS([]byte(cert), []byte(key))`.
+
+#### WithBootstrapConfig
+
+- Since testcontainers-go :material-tag: v0.33.0
+
+`WithBootstrapConfig` adds an arbitrary config key-value pair to the Redpanda container. Per the name, this config will be interpolated into the generated bootstrap
+config file, which is particularly useful for configs requiring a restart when otherwise applied to a running Redpanda instance.
+E.g. `WithBootstrapConfig("config_key", config_value)`, where `config_value` is of type `any`.
+
### Container Methods
The Redpanda container exposes the following methods:
#### KafkaSeedBroker
+- Since testcontainers-go :material-tag: v0.20.0
+
KafkaSeedBroker returns the seed broker that should be used for connecting
to the Kafka API with your Kafka client. It'll be returned in the format:
"host:port" - for example: "localhost:55687".
@@ -95,6 +162,8 @@ to the Kafka API with your Kafka client. It'll be returned in the format:
#### SchemaRegistryAddress
+- Since testcontainers-go :material-tag: v0.20.0
+
SchemaRegistryAddress returns the address to the schema registry API. This
is an HTTP-based API and thus the returned format will be: http://host:port.
@@ -105,6 +174,8 @@ is an HTTP-based API and thus the returned format will be: http://host:port.
#### AdminAPIAddress
+- Since testcontainers-go :material-tag: v0.20.0
+
AdminAPIAddress returns the address to the Redpanda Admin API. This
is an HTTP-based API and thus the returned format will be: http://host:port.
diff --git a/modules/registry/registry.go b/modules/registry/registry.go
index 6cfa3d537b..22aa86be54 100644
--- a/modules/registry/registry.go
+++ b/modules/registry/registry.go
@@ -220,10 +220,9 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom
// convenient for testing
"REGISTRY_STORAGE_DELETE_ENABLED": "true",
},
- WaitingFor: wait.ForAll(
- wait.ForExposedPort(),
- wait.ForLog("listening on [::]:5000").WithStartupTimeout(10*time.Second),
- ),
+ WaitingFor: wait.ForHTTP("/").
+ WithPort(registryPort).
+ WithStartupTimeout(10 * time.Second),
}
genericContainerReq := testcontainers.GenericContainerRequest{
diff --git a/parallel.go b/parallel.go
index 0027619b4c..0349023ba2 100644
--- a/parallel.go
+++ b/parallel.go
@@ -2,7 +2,6 @@ package testcontainers
import (
"context"
- "errors"
"fmt"
"sync"
)
@@ -32,24 +31,27 @@ func (gpe ParallelContainersError) Error() string {
return fmt.Sprintf("%v", gpe.Errors)
}
+// parallelContainersResult represents result.
+type parallelContainersResult struct {
+ ParallelContainersRequestError
+ Container Container
+}
+
func parallelContainersRunner(
ctx context.Context,
requests <-chan GenericContainerRequest,
- errorsCh chan<- ParallelContainersRequestError,
- containers chan<- Container,
+ results chan<- parallelContainersResult,
wg *sync.WaitGroup,
) {
defer wg.Done()
for req := range requests {
c, err := GenericContainer(ctx, req)
+ res := parallelContainersResult{Container: c}
if err != nil {
- errorsCh <- ParallelContainersRequestError{
- Request: req,
- Error: errors.Join(err, TerminateContainer(c)),
- }
- continue
+ res.Request = req
+ res.Error = err
}
- containers <- c
+ results <- res
}
}
@@ -65,41 +67,26 @@ func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt
}
tasksChan := make(chan GenericContainerRequest, tasksChanSize)
- errsChan := make(chan ParallelContainersRequestError)
- resChan := make(chan Container)
- waitRes := make(chan struct{})
-
- containers := make([]Container, 0)
- errors := make([]ParallelContainersRequestError, 0)
+ resultsChan := make(chan parallelContainersResult, tasksChanSize)
+ done := make(chan struct{})
- wg := sync.WaitGroup{}
+ var wg sync.WaitGroup
wg.Add(tasksChanSize)
// run workers
for i := 0; i < tasksChanSize; i++ {
- go parallelContainersRunner(ctx, tasksChan, errsChan, resChan, &wg)
+ go parallelContainersRunner(ctx, tasksChan, resultsChan, &wg)
}
+ var errs []ParallelContainersRequestError
+ containers := make([]Container, 0, len(reqs))
go func() {
- for {
- select {
- case c, ok := <-resChan:
- if !ok {
- resChan = nil
- } else {
- containers = append(containers, c)
- }
- case e, ok := <-errsChan:
- if !ok {
- errsChan = nil
- } else {
- errors = append(errors, e)
- }
- }
-
- if resChan == nil && errsChan == nil {
- waitRes <- struct{}{}
- break
+ defer close(done)
+ for res := range resultsChan {
+ if res.Error != nil {
+ errs = append(errs, res.ParallelContainersRequestError)
+ } else {
+ containers = append(containers, res.Container)
}
}
}()
@@ -108,14 +95,15 @@ func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt
tasksChan <- req
}
close(tasksChan)
+
wg.Wait()
- close(resChan)
- close(errsChan)
- <-waitRes
+ close(resultsChan)
+
+ <-done
- if len(errors) != 0 {
- return containers, ParallelContainersError{Errors: errors}
+ if len(errs) != 0 {
+ return containers, ParallelContainersError{Errors: errs}
}
return containers, nil
diff --git a/parallel_test.go b/parallel_test.go
index f937b1e56d..25f919e99d 100644
--- a/parallel_test.go
+++ b/parallel_test.go
@@ -2,7 +2,6 @@ package testcontainers
import (
"context"
- "errors"
"fmt"
"testing"
"time"
@@ -99,23 +98,18 @@ func TestParallelContainers(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
res, err := ParallelContainers(context.Background(), tc.reqs, ParallelContainersOptions{})
- if err != nil {
- require.NotZero(t, tc.expErrors)
- var e ParallelContainersError
- errors.As(err, &e)
- if len(e.Errors) != tc.expErrors {
- t.Fatalf("expected errors: %d, got: %d\n", tc.expErrors, len(e.Errors))
- }
- }
-
for _, c := range res {
- c := c
CleanupContainer(t, c)
}
- if len(res) != tc.resLen {
- t.Fatalf("expected containers: %d, got: %d\n", tc.resLen, len(res))
+ if tc.expErrors != 0 {
+ require.Error(t, err)
+ var errs ParallelContainersError
+ require.ErrorAs(t, err, &errs)
+ require.Len(t, errs.Errors, tc.expErrors)
}
+
+ require.Len(t, res, tc.resLen)
})
}
}
@@ -157,11 +151,8 @@ func TestParallelContainersWithReuse(t *testing.T) {
ctx := context.Background()
res, err := ParallelContainers(ctx, parallelRequest, ParallelContainersOptions{})
- if err != nil {
- var e ParallelContainersError
- errors.As(err, &e)
- t.Fatalf("expected errors: %d, got: %d\n", 0, len(e.Errors))
+ for _, c := range res {
+ CleanupContainer(t, c)
}
- // Container is reused, only terminate first container
- CleanupContainer(t, res[0])
+ require.NoError(t, err)
}
diff --git a/wait/exec_test.go b/wait/exec_test.go
index 224f7d99d9..8a82fb0211 100644
--- a/wait/exec_test.go
+++ b/wait/exec_test.go
@@ -22,16 +22,17 @@ import (
func ExampleExecStrategy() {
ctx := context.Background()
req := testcontainers.ContainerRequest{
- Image: "localstack/localstack:latest",
- WaitingFor: wait.ForExec([]string{"awslocal", "dynamodb", "list-tables"}),
+ Image: "alpine:latest",
+ Entrypoint: []string{"tail", "-f", "/dev/null"}, // needed for the container to stay alive
+ WaitingFor: wait.ForExec([]string{"ls", "/"}).WithStartupTimeout(1 * time.Second),
}
- localstack, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
+ ctr, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
defer func() {
- if err := testcontainers.TerminateContainer(localstack); err != nil {
+ if err := testcontainers.TerminateContainer(ctr); err != nil {
log.Printf("failed to terminate container: %s", err)
}
}()
@@ -40,7 +41,7 @@ func ExampleExecStrategy() {
return
}
- state, err := localstack.State(ctx)
+ state, err := ctr.State(ctx)
if err != nil {
log.Printf("failed to get container state: %s", err)
return