Skip to content

Commit

Permalink
Merge pull request #32 from enriquebris/development
Browse files Browse the repository at this point in the history
v0.9.0
  • Loading branch information
enriquebris authored Jan 7, 2019
2 parents 8d0aadd + e17f995 commit 0502297
Show file tree
Hide file tree
Showing 9 changed files with 903 additions and 11 deletions.
76 changes: 76 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
# Created by .ignore support plugin (hsz.mobi)
### Go template
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib

# Test binary, build with `go test -c`
*.test

# Output of the go coverage tool, specifically when used with LiteIDE
*.out
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839

# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf

# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml

# Gradle
.idea/**/gradle.xml
.idea/**/libraries

# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/modules.xml
# .idea/*.iml
# .idea/modules

# CMake
cmake-build-*/

# Mongo Explorer plugin
.idea/**/mongoSettings.xml

# File-based project format
*.iws

# IntelliJ
out/

# mpeltonen/sbt-idea plugin
.idea_modules/

# JIRA plugin
atlassian-ide-plugin.xml

# Cursive Clojure plugin
.idea/replstate.xml

# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties

# Editor-based Rest Client
.idea/httpRequests

12 changes: 11 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,14 @@ language: go

go:
- 1.9.x
- 1.10.x
- 1.10.x
- 1.11.x

before_install:
- go get -t -v ./...

script:
- go test -coverprofile=coverage.txt -covermode=atomic

after_success:
- bash <(curl -s https://codecov.io/bash)
3 changes: 2 additions & 1 deletion examples/basic/main.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
// On this example:
// - 10 workers will be running forever
// - 10 workers will be started up
// - 30 jobs will be enqueued to be processed by the workers
// - all workers will be killed after the 30 enqueued jobs get processed

package main

Expand Down
71 changes: 71 additions & 0 deletions examples/killed_workers_notifications/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
// On this example:
// - 10 workers will be started up
// - the execution will wait until all 10 workers are alive
// - 30 jobs will be enqueued to be processed by the workers
// - all workers will be killed after the 30 enqueued jobs get processed
// - a notification will be sent once a worker is killed (10 notifications will be received)

package main

import (
"fmt"
"log"
"time"

"github.com/enriquebris/goworkerpool"
)

func main() {
// total workers
totalInitialWorkers := 10
// max number of pending jobs
maxNumberPendingJobs := 15
// do not log messages about the pool processing
verbose := false

pool := goworkerpool.NewPool(totalInitialWorkers, maxNumberPendingJobs, verbose)

// add the worker handler function
pool.SetWorkerFunc(func(data interface{}) bool {
log.Printf("processing %v\n", data)
// add a 1 second delay (to makes it look as it were processing the job)
time.Sleep(time.Second)
log.Printf("processing finished for: %v\n", data)

// let the pool knows that the worker was able to complete the task
return true
})

// set the channel to receive notifications every time a worker is killed
killedWorkerNotificationChannel := make(chan int)
pool.SetKilledWorkerChan(killedWorkerNotificationChannel)

// start up the workers
pool.StartWorkers()

// enqueue jobs in a separate goroutine
go func() {
for i := 0; i < 30; i++ {
pool.AddTask(i)
}

// kill all workers after the currently enqueued jobs get processed
pool.LateKillAllWorkers()
}()

// Instead of use pool.Wait() to wait until all workers are down, the following loop will be listening to the signals
// sent once a worker is killed. The loop will exit after all initial workers were killed.
totalKilledWorkers := 0
// wait until all initial workers are alive
for notification := range killedWorkerNotificationChannel {
totalKilledWorkers = totalKilledWorkers + notification
fmt.Printf("total killed workers: %v\n", totalKilledWorkers)

if totalKilledWorkers == totalInitialWorkers {
// break the loop once all initial workers are already up
break
}
}

fmt.Printf("All %v workers are down\n", totalInitialWorkers)
}
72 changes: 72 additions & 0 deletions examples/new_workers_notifications/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
// On this example:
// - 10 workers will be started up
// - the execution will wait until all 10 workers are alive
// - 30 jobs will be enqueued to be processed by the workers
// - all workers will be killed after the 30 enqueued jobs get processed

package main

import (
"log"
"time"

"github.com/enriquebris/goworkerpool"
)

func main() {
// total workers
totalInitialWorkers := 10
// max number of pending jobs
maxNumberPendingJobs := 15
// do not log messages about the pool processing
verbose := false

pool := goworkerpool.NewPool(totalInitialWorkers, maxNumberPendingJobs, verbose)

// add the worker handler function
pool.SetWorkerFunc(func(data interface{}) bool {
log.Printf("processing %v\n", data)
// add a 1 second delay (to makes it look as it were processing the job)
time.Sleep(time.Second)
log.Printf("processing finished for: %v\n", data)

// let the pool knows that the worker was able to complete the task
return true
})

// set the channel to receive notifications every time a new worker is started up
newWorkerNotificationChannel := make(chan int)
pool.SetNewWorkerChan(newWorkerNotificationChannel)


// Note that the following lines (45 to 56) could be replaced by pool.StartWorkersAndWait() to achieve the
// same goal: wait until all workers are up. This code is intended as an example.

// start up the workers
pool.StartWorkers()

totalWorkersUp := 0
// wait until all initial workers are alive
for notification := range newWorkerNotificationChannel {
totalWorkersUp = totalWorkersUp + notification
if totalWorkersUp == totalInitialWorkers {
// break the loop once all initial workers are already up
break
}
}

log.Printf("%v workers are up\n", totalWorkersUp)

// enqueue jobs in a separate goroutine
go func() {
for i := 0; i < 30; i++ {
pool.AddTask(i)
}

// kill all workers after the currently enqueued jobs get processed
pool.LateKillAllWorkers()
}()

// wait while at least one worker is alive
pool.Wait()
}
4 changes: 4 additions & 0 deletions history.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
## History

### v0.9.0

- Added optional channel to let know that new workers were started

### v0.8.0

- Enqueue jobs plus callback functions
Expand Down
Loading

0 comments on commit 0502297

Please sign in to comment.