Skip to content

Commit

Permalink
Multiple changes:
Browse files Browse the repository at this point in the history
* Add error messages when errors are returned
  --> There are still too many way where we just ignore errors
* Add custom Prometheus metric to track when tests are in progress
* Add annotation query to show when tests are in progress
* Extend Gosbench example k8s test by a second test and make them run longer
  • Loading branch information
mulbc committed Jun 6, 2020
1 parent 4511826 commit 6b8980f
Show file tree
Hide file tree
Showing 9 changed files with 160 additions and 62 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@
testconf.yaml
modd.conf
main
dist/
dist/
k8s/gosbench.yaml
26 changes: 16 additions & 10 deletions Makefile
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,15 +1,21 @@
BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
VCS_REF = $(shell git rev-parse HEAD)
BUILD_DATE := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
UNIX_DATE := $(shell date -u +"%s")
VCS_REF := $(shell git rev-parse HEAD)

build:
docker build --tag quay.io/mulbc/goroom-server:dev --build-arg "TYPE=server" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" .
docker build --tag quay.io/mulbc/goroom-worker:dev --build-arg "TYPE=worker" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" .
docker build --tag quay.io/mulbc/gosbench-server:$(VCS_REF) --build-arg "TYPE=server" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" .
docker build --tag quay.io/mulbc/gosbench-worker:$(VCS_REF) --build-arg "TYPE=worker" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" .
debug-server:
docker run --rm --name=goroom-server -it quay.io/mulbc/goroom-server:dev sh
docker run --rm --name=gosbench-server -it quay.io/mulbc/gosbench-server:$(VCS_REF) sh
debug-worker:
docker run --rm --name=goroom-worker -it quay.io/mulbc/goroom-worker:dev sh
docker run --rm --name=gosbench-worker -it quay.io/mulbc/gosbench-worker:$(VCS_REF) sh
release:
docker tag quay.io/mulbc/goroom-server:dev quay.io/mulbc/goroom-server:latest
docker tag quay.io/mulbc/goroom-worker:dev quay.io/mulbc/goroom-worker:latest
docker push quay.io/mulbc/goroom-server:latest
docker push quay.io/mulbc/goroom-worker:latest
docker tag quay.io/mulbc/gosbench-server:$(VCS_REF) quay.io/mulbc/gosbench-server:latest
docker tag quay.io/mulbc/gosbench-worker:$(VCS_REF) quay.io/mulbc/gosbench-worker:latest
docker push quay.io/mulbc/gosbench-server:latest
docker push quay.io/mulbc/gosbench-worker:latest
push-dev:
docker build --tag quay.io/mulbc/gosbench-server:$(UNIX_DATE) --build-arg "TYPE=server" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" .
docker build --tag quay.io/mulbc/gosbench-worker:$(UNIX_DATE) --build-arg "TYPE=worker" --build-arg "BUILD_DATE=$(BUILD_DATE)" --build-arg "VCS_REF=$(VCS_REF)" .
docker push quay.io/mulbc/gosbench-server:$(UNIX_DATE)
docker push quay.io/mulbc/gosbench-worker:$(UNIX_DATE)
58 changes: 49 additions & 9 deletions k8s/gosbench.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ kind: ConfigMap
data:
config.yml: |-
s3_config:
- access_key: 8MKI...
secret_key: Y7quDBT...
- access_key: 8MKI7yLHF2L5Z07rabTZ
secret_key: Y7quDBT3az/emhiDOiQR18hZLKzFaKUjOU4YyPCP
region: eu-central-1
endpoint: https://172.30.196.58:443
skipSSLverify: true
Expand All @@ -18,7 +18,8 @@ data:
password: grafana
tests:
- read_weight: 20
- name: EverythingWorks
read_weight: 20
write_weight: 80
delete_weight: 0
list_weight: 0
Expand All @@ -29,24 +30,63 @@ data:
# distribution: constant, random, sequential
size_distribution: random
unit: KB
number_min: 10
number_min: 100
number_max: 100
# distribution: constant, random, sequential
number_distribution: constant
buckets:
number_min: 1
number_max: 10
# distribution: constant, random, sequential
number_distribution: constant
# Name prefix for buckets and objects
bucket_prefix: gosbench1-
object_prefix: obj
# End after a set amount of time
# Runtime in time.Duration - do not forget the unit please
# stop_with_runtime: 60s # Example with 60 seconds runtime
stop_with_runtime:
# End after a set amount of operations (per worker)
stop_with_ops: 3000
# Number of s3 performance test servers to run in parallel
workers: 2
# Set wheter workers share the same buckets or not
# If set to True - bucket names will have the worker # appended
workers_share_buckets: True
# Number of requests processed in parallel by each worker
parallel_clients: 3
# Remove all generated buckets and its content after run
clean_after: True
- name: EverythingWorksAgain
read_weight: 20
write_weight: 80
delete_weight: 0
list_weight: 0
objects:
size_min: 5
size_max: 100
part_size: 0
# distribution: constant, random, sequential
size_distribution: random
unit: KB
number_min: 100
number_max: 100
# distribution: constant, random, sequential
number_distribution: constant
buckets:
number_min: 1
number_max: 10
# distribution: constant, random, sequential
number_distribution: constant
# Name prefix for buckets and objects
bucket_prefix: 1255gosbench-
bucket_prefix: gosbench1-
object_prefix: obj
# End after a set amount of time
# Runtime in time.Duration - do not forget the unit please
# stop_with_runtime: 60s # Example with 60 seconds runtime
stop_with_runtime:
# End after a set amount of operations (per worker)
stop_with_ops: 10
stop_with_ops: 3000
# Number of s3 performance test servers to run in parallel
workers: 2
# Set wheter workers share the same buckets or not
Expand Down Expand Up @@ -75,7 +115,7 @@ spec:
spec:
containers:
- name: gosbench-worker
image: quay.io/mulbc/goroom-worker:dev4
image: quay.io/mulbc/gosbench-worker
imagePullPolicy: Always
command: ['./main', '-d', '-s', 'gosbench-server:2000']
ports:
Expand All @@ -96,7 +136,7 @@ spec:
spec:
containers:
- name: gosbench-worker
image: quay.io/mulbc/goroom-worker:dev4
image: quay.io/mulbc/gosbench-worker
imagePullPolicy: Always
command: ['./main', '-d', '-s', 'gosbench-server:2000']
ports:
Expand All @@ -121,7 +161,7 @@ spec:
spec:
containers:
- name: server
image: quay.io/mulbc/goroom-server:dev4
image: quay.io/mulbc/gosbench-server
imagePullPolicy: Always
command: ['./main', '-c', '/app/config/config.yml']
ports:
Expand Down
2 changes: 1 addition & 1 deletion k8s/monitoring.yaml

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func scheduleTests(config common.Testconf) {
func executeTestOnWorker(conn *net.Conn, config *common.WorkerConf, doneChannel chan bool, continueWorkers chan bool) {
encoder := json.NewEncoder(*conn)
decoder := json.NewDecoder(*conn)
encoder.Encode(common.WorkerMessage{Message: "init", Config: config})
_ = encoder.Encode(common.WorkerMessage{Message: "init", Config: config})

var response common.WorkerMessage
for {
Expand All @@ -166,7 +166,7 @@ func executeTestOnWorker(conn *net.Conn, config *common.WorkerConf, doneChannel
case "preparations done":
doneChannel <- true
<-continueWorkers
encoder.Encode(common.WorkerMessage{Message: "start work"})
_ = encoder.Encode(common.WorkerMessage{Message: "start work"})
case "work done":
doneChannel <- true
(*conn).Close()
Expand All @@ -178,5 +178,5 @@ func executeTestOnWorker(conn *net.Conn, config *common.WorkerConf, doneChannel
func shutdownWorker(conn *net.Conn) {
encoder := json.NewEncoder(*conn)
log.WithField("Worker", (*conn).RemoteAddr()).Info("Shutting down worker")
encoder.Encode(common.WorkerMessage{Message: "shutdown"})
_ = encoder.Encode(common.WorkerMessage{Message: "shutdown"})
}
2 changes: 1 addition & 1 deletion server/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ func Test_loadConfigFromFile(t *testing.T) {
- access_key: secretKey
secret_key: secretSecret
endpoint: test`)}, common.Testconf{
S3Config: []*common.S3Configuration{&common.S3Configuration{
S3Config: []*common.S3Configuration{{
Endpoint: "test",
AccessKey: "secretKey",
SecretKey: "secretSecret",
Expand Down
60 changes: 47 additions & 13 deletions worker/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func connectToServer(serverAddress string) error {
encoder := json.NewEncoder(conn)
decoder := json.NewDecoder(conn)

encoder.Encode("ready for work")
_ = encoder.Encode("ready for work")

var response common.WorkerMessage
Workqueue := &Workqueue{
Expand All @@ -86,18 +86,21 @@ func connectToServer(serverAddress string) error {
fillWorkqueue(config.Test, Workqueue, config.WorkerID, config.Test.WorkerShareBuckets)

for _, work := range *Workqueue.Queue {
work.Prepare()
err = work.Prepare()
if err != nil {
log.WithError(err).Error("Error during work preparation - ignoring")
}
}
log.Info("Preparations finished - waiting on server to start work")
encoder.Encode(common.WorkerMessage{Message: "preparations done"})
_ = encoder.Encode(common.WorkerMessage{Message: "preparations done"})
case "start work":
if config == (common.WorkerConf{}) || len(*Workqueue.Queue) == 0 {
log.Fatal("Was instructed to start work - but the preparation step is incomplete - reconnecting")
return nil
}
log.Info("Starting to work")
PerfTest(config.Test, Workqueue, config.WorkerID)
encoder.Encode(common.WorkerMessage{Message: "work done"})
_ = encoder.Encode(common.WorkerMessage{Message: "work done"})
// Work is done - return to being a ready worker by reconnecting
return nil
case "shutdown":
Expand All @@ -111,6 +114,8 @@ func connectToServer(serverAddress string) error {
func PerfTest(testConfig *common.TestCaseConfiguration, Workqueue *Workqueue, workerID string) {
workChannel := make(chan WorkItem, len(*Workqueue.Queue))
doneChannel := make(chan bool)

promTestGauge.WithLabelValues(testConfig.Name).Inc()
for worker := 0; worker < testConfig.ParallelClients; worker++ {
go DoWork(workChannel, doneChannel)
}
Expand All @@ -125,13 +130,21 @@ func PerfTest(testConfig *common.TestCaseConfiguration, Workqueue *Workqueue, wo
<-doneChannel
}
log.Info("All clients finished")
promTestGauge.WithLabelValues(testConfig.Name).Dec()

if testConfig.CleanAfter {
log.Info("Housekeeping started")
for _, work := range *Workqueue.Queue {
work.Clean()
err := work.Clean()
if err != nil {
log.WithError(err).Error("Error during cleanup - ignoring")
}
}
for bucket := uint64(0); bucket < testConfig.Buckets.NumberMax; bucket++ {
deleteBucket(housekeepingSvc, fmt.Sprintf("%s%s%d", workerID, testConfig.BucketPrefix, bucket))
err := deleteBucket(housekeepingSvc, fmt.Sprintf("%s%s%d", workerID, testConfig.BucketPrefix, bucket))
if err != nil {
log.WithError(err).Error("Error during bucket deleting - ignoring")
}
}
log.Info("Housekeeping finished")
}
Expand All @@ -154,7 +167,10 @@ func workUntilTimeout(Workqueue *Workqueue, workChannel chan WorkItem, runtime t
switch work.(type) {
case DeleteOperation:
log.Debug("Re-Running Work preparation for delete job started")
work.Prepare()
err := work.Prepare()
if err != nil {
log.WithError(err).Error("Error during work preparation - ignoring")
}
log.Debug("Delete preparation re-run finished")
}
}
Expand All @@ -179,7 +195,10 @@ func workUntilOps(Workqueue *Workqueue, workChannel chan WorkItem, maxOps uint64
switch work.(type) {
case DeleteOperation:
log.Debug("Re-Running Work preparation for delete job started")
work.Prepare()
err := work.Prepare()
if err != nil {
log.WithError(err).Error("Error during work preparation - ignoring")
}
log.Debug("Delete preparation re-run finished")
}
}
Expand Down Expand Up @@ -231,7 +250,10 @@ func fillWorkqueue(testConfig *common.TestCaseConfiguration, Workqueue *Workqueu
nextOp := GetNextOperation(Workqueue)
switch nextOp {
case "read":
IncreaseOperationValue(nextOp, 1/float64(testConfig.ReadWeight), Workqueue)
err := IncreaseOperationValue(nextOp, 1/float64(testConfig.ReadWeight), Workqueue)
if err != nil {
log.WithError(err).Error("Could not increase operational Value - ignoring")
}
new := ReadOperation{
Bucket: bucketName,
ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object),
Expand All @@ -240,7 +262,10 @@ func fillWorkqueue(testConfig *common.TestCaseConfiguration, Workqueue *Workqueu
}
*Workqueue.Queue = append(*Workqueue.Queue, new)
case "existing_read":
IncreaseOperationValue(nextOp, 1/float64(testConfig.ExistingReadWeight), Workqueue)
err := IncreaseOperationValue(nextOp, 1/float64(testConfig.ExistingReadWeight), Workqueue)
if err != nil {
log.WithError(err).Error("Could not increase operational Value - ignoring")
}
new := ReadOperation{
// TODO: Get bucket and object that already exist
Bucket: bucketName,
Expand All @@ -250,23 +275,32 @@ func fillWorkqueue(testConfig *common.TestCaseConfiguration, Workqueue *Workqueu
}
*Workqueue.Queue = append(*Workqueue.Queue, new)
case "write":
IncreaseOperationValue(nextOp, 1/float64(testConfig.WriteWeight), Workqueue)
err := IncreaseOperationValue(nextOp, 1/float64(testConfig.WriteWeight), Workqueue)
if err != nil {
log.WithError(err).Error("Could not increase operational Value - ignoring")
}
new := WriteOperation{
Bucket: bucketName,
ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object),
ObjectSize: objectSize,
}
*Workqueue.Queue = append(*Workqueue.Queue, new)
case "list":
IncreaseOperationValue(nextOp, 1/float64(testConfig.ListWeight), Workqueue)
err := IncreaseOperationValue(nextOp, 1/float64(testConfig.ListWeight), Workqueue)
if err != nil {
log.WithError(err).Error("Could not increase operational Value - ignoring")
}
new := ListOperation{
Bucket: bucketName,
ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object),
ObjectSize: objectSize,
}
*Workqueue.Queue = append(*Workqueue.Queue, new)
case "delete":
IncreaseOperationValue(nextOp, 1/float64(testConfig.DeleteWeight), Workqueue)
err := IncreaseOperationValue(nextOp, 1/float64(testConfig.DeleteWeight), Workqueue)
if err != nil {
log.WithError(err).Error("Could not increase operational Value - ignoring")
}
new := DeleteOperation{
Bucket: bucketName,
ObjectName: fmt.Sprintf("%s%s%d", workerID, testConfig.ObjectPrefix, object),
Expand Down
Loading

0 comments on commit 6b8980f

Please sign in to comment.