From 29a0b1504f773ca8e59dbac73ed0f63718f224cd Mon Sep 17 00:00:00 2001 From: katarzyna-z Date: Mon, 10 Oct 2016 14:43:15 +0200 Subject: [PATCH] Cleaning of examples/ folder Removed the reference to examples/tasks/ in docs/TASKS.md Removed the reference to examples/tasks/ in core/task_medium_test.go Removed the reference to examples/tasks/ in readme.md Removed the reference to examples/tasks/ in docs/SNAPCTL.md Removed the reference to examples/tasks/ in docs/REST_API.md --- README.md | 37 +- core/task_medium_test.go | 140 ++++- docs/REST_API.md | 49 +- docs/SNAPCTL.md | 79 ++- docs/TASKS.md | 58 +- examples/README.md | 19 +- examples/influxdb-grafana/README.md | 49 -- examples/influxdb-grafana/docker-compose.yml | 16 - examples/influxdb-grafana/grafana/Dockerfile | 20 - examples/influxdb-grafana/grafana/psutil.json | 543 ------------------ .../influxdb-grafana/influxdb/0.9/Dockerfile | 34 -- .../influxdb-grafana/influxdb/0.9/config.toml | 235 -------- examples/influxdb-grafana/influxdb/0.9/run.sh | 156 ----- .../influxdb-grafana/influxdb/0.9/types.db | 241 -------- examples/influxdb-grafana/run-psutil.sh | 131 ----- examples/riemann/Vagrantfile | 41 -- examples/riemann/script/init.sh | 57 -- examples/riemann/service/config.rb | 2 - examples/riemann/service/riemann-dash.service | 10 - examples/riemann/service/riemann.service | 10 - examples/tasks/README.md | 56 -- examples/tasks/ceph-file.json | 66 --- examples/tasks/distributed-mock-file.json | 39 -- examples/tasks/mock-file.json | 37 -- examples/tasks/mock-file.yaml | 28 - .../tasks/mock-file_specific_instance.json | 30 - examples/tasks/mock-file_tuple.json | 29 - examples/tasks/mock_tagged-file.json | 43 -- examples/tasks/psutil-file.yaml | 25 - examples/tasks/psutil-file_no-processor.yaml | 17 - examples/tasks/psutil-influx.json | 33 -- 31 files changed, 352 insertions(+), 1978 deletions(-) delete mode 100644 examples/influxdb-grafana/README.md delete mode 100644 examples/influxdb-grafana/docker-compose.yml delete mode 100644 examples/influxdb-grafana/grafana/Dockerfile delete mode 100644 examples/influxdb-grafana/grafana/psutil.json delete mode 100644 examples/influxdb-grafana/influxdb/0.9/Dockerfile delete mode 100644 examples/influxdb-grafana/influxdb/0.9/config.toml delete mode 100755 examples/influxdb-grafana/influxdb/0.9/run.sh delete mode 100644 examples/influxdb-grafana/influxdb/0.9/types.db delete mode 100755 examples/influxdb-grafana/run-psutil.sh delete mode 100644 examples/riemann/Vagrantfile delete mode 100755 examples/riemann/script/init.sh delete mode 100644 examples/riemann/service/config.rb delete mode 100644 examples/riemann/service/riemann-dash.service delete mode 100644 examples/riemann/service/riemann.service delete mode 100644 examples/tasks/README.md delete mode 100644 examples/tasks/ceph-file.json delete mode 100644 examples/tasks/distributed-mock-file.json delete mode 100644 examples/tasks/mock-file.json delete mode 100644 examples/tasks/mock-file.yaml delete mode 100644 examples/tasks/mock-file_specific_instance.json delete mode 100644 examples/tasks/mock-file_tuple.json delete mode 100644 examples/tasks/mock_tagged-file.json delete mode 100644 examples/tasks/psutil-file.yaml delete mode 100644 examples/tasks/psutil-file_no-processor.yaml delete mode 100644 examples/tasks/psutil-influx.json diff --git a/README.md b/README.md index 45dad02b5..848699dc0 100644 --- a/README.md +++ b/README.md @@ -155,11 +155,44 @@ file 3 publisher false loaded You now have one of each plugin type loaded into the framework. To begin collecting data, you need to create a task. ### Running Tasks -Tasks are most often shared as a Task Manifest and is written in JSON or YAML format. Make a copy of [this example task](./examples/tasks/mock-file.yaml) from the `examples/tasks/` directory on your local system and then start the task: +[Tasks](docs/TASKS.md) are most often shared as a Task Manifest and is written in JSON or YAML format. + +Create a task manifest file, for example `mock-file.yaml` with following content: +```yaml +--- + version: 1 + schedule: + type: "simple" + interval: "1s" + max-failures: 10 + workflow: + collect: + metrics: + /intel/mock/foo: {} + /intel/mock/bar: {} + /intel/mock/*/baz: {} + config: + /intel/mock: + name: "root" + password: "secret" + process: + - + plugin_name: "passthru" + config: + debug: true + process: null + publish: + - + plugin_name: "mock-file" + config: + file: "/tmp/snap_published_mock_file.log" + debug: true +``` + +and then start the task: ``` $ cd ~/snap -$ curl https://raw.githubusercontent.com/intelsdi-x/snap/master/examples/tasks/mock-file.yaml > mock-file.yaml $ snapctl task create -t mock-file.yaml Using task manifest to create task Task created diff --git a/core/task_medium_test.go b/core/task_medium_test.go index 3df3848af..dd577dba7 100644 --- a/core/task_medium_test.go +++ b/core/task_medium_test.go @@ -48,11 +48,84 @@ func (t *taskErrors) Errors() []serror.SnapError { const ( DUMMY_FILE = "dummy.txt" - YAML_FILE = "../examples/tasks/mock-file.yaml" - JSON_FILE = "../examples/tasks/mock-file.json" + YAML_FILE = "./mock-file.yaml" + JSON_FILE = "./mock-file.json" DUMMY_TYPE = "dummy" ) +var ( + YAML_FILE_CONTENT = []byte(` + --- + version: 1 + schedule: + type: "simple" + interval: "1s" + max-failures: 10 + workflow: + collect: + metrics: + /intel/mock/foo: {} + /intel/mock/bar: {} + /intel/mock/*/baz: {} + config: + /intel/mock: + name: "root" + password: "secret" + process: + - + plugin_name: "passthru" + config: + debug: true + process: null + publish: + - + plugin_name: "file" + config: + file: "/tmp/snap_published_mock_file.log" + debug: true + `) + + JSON_FILE_CONTENT = []byte(` + { + "version": 1, + "schedule": { + "type": "simple", + "interval": "1s" + }, + "max-failures": 10, + "workflow": { + "collect": { + "metrics": { + "/intel/mock/foo": {}, + "/intel/mock/bar": {}, + "/intel/mock/*/baz": {} + }, + "config": { + "/intel/mock": { + "name": "root", + "password": "secret" + } + }, + "process": [ + { + "plugin_name": "passthru", + "process": null, + "publish": [ + { + "plugin_name": "file", + "config": { + "file": "/tmp/snap_published_mock_file.log" + } + } + ] + } + ] + } + } + } + `) +) + func koRoutine(sch schedule.Schedule, wfMap *wmap.WorkflowMap, startOnCreate bool, @@ -72,8 +145,21 @@ func okRoutine(sch schedule.Schedule, return nil, nil } -func TestUnmarshalBodyTask(t *testing.T) { +func createTaskFile(name string, content []byte) error { + f, err := os.Create(name) + if err != nil { + return err + } + f.Write(content) + return nil +} + +func deleteTaskFile(name string) error { + err := os.Remove(name) + return err +} +func TestUnmarshalBodyTask(t *testing.T) { Convey("Non existing file", t, func() { file, err := os.Open(DUMMY_FILE) So(file, ShouldBeNil) @@ -85,6 +171,9 @@ func TestUnmarshalBodyTask(t *testing.T) { }) Convey("Bad JSON file", t, func() { + err := createTaskFile(YAML_FILE, YAML_FILE_CONTENT) + So(err, ShouldBeNil) + var tr TaskReq1 file, err := os.Open(YAML_FILE) So(file, ShouldNotBeNil) @@ -93,9 +182,15 @@ func TestUnmarshalBodyTask(t *testing.T) { So(code, ShouldEqual, 400) So(err, ShouldNotBeNil) So(err.Error(), ShouldEqual, "invalid character '-' in numeric literal") + + err = deleteTaskFile(YAML_FILE) + So(err, ShouldBeNil) }) Convey("Proper JSON file", t, func() { + err := createTaskFile(JSON_FILE, JSON_FILE_CONTENT) + So(err, ShouldBeNil) + var tr TaskReq1 file, err := os.Open(JSON_FILE) So(file, ShouldNotBeNil) @@ -103,6 +198,9 @@ func TestUnmarshalBodyTask(t *testing.T) { code, err := UnmarshalBody(&tr, file) So(code, ShouldEqual, 0) So(err, ShouldBeNil) + + err = deleteTaskFile(JSON_FILE) + So(err, ShouldBeNil) }) } @@ -119,6 +217,9 @@ func TestCreateTaskRequest(t *testing.T) { }) Convey("Bad JSON file", t, func() { + err := createTaskFile(YAML_FILE, YAML_FILE_CONTENT) + So(err, ShouldBeNil) + file, err := os.Open(YAML_FILE) So(file, ShouldNotBeNil) So(err, ShouldBeNil) @@ -126,9 +227,15 @@ func TestCreateTaskRequest(t *testing.T) { So(task, ShouldBeNil) So(err, ShouldNotBeNil) So(err.Error(), ShouldEqual, "invalid character '-' in numeric literal") + + err = deleteTaskFile(YAML_FILE) + So(err, ShouldBeNil) }) Convey("Proper JSON file", t, func() { + err := createTaskFile(JSON_FILE, JSON_FILE_CONTENT) + So(err, ShouldBeNil) + file, err := os.Open(JSON_FILE) So(file, ShouldNotBeNil) So(err, ShouldBeNil) @@ -142,6 +249,9 @@ func TestCreateTaskRequest(t *testing.T) { So(task.Schedule.StartTimestamp, ShouldBeNil) So(task.Schedule.StopTimestamp, ShouldBeNil) So(task.Start, ShouldEqual, false) + + err = deleteTaskFile(JSON_FILE) + So(err, ShouldBeNil) }) } @@ -159,6 +269,9 @@ func TestCreateTaskFromContent(t *testing.T) { }) Convey("Bad JSON file", t, func() { + err := createTaskFile(YAML_FILE, YAML_FILE_CONTENT) + So(err, ShouldBeNil) + file, err := os.Open(YAML_FILE) So(file, ShouldNotBeNil) So(err, ShouldBeNil) @@ -167,9 +280,15 @@ func TestCreateTaskFromContent(t *testing.T) { So(task, ShouldBeNil) So(err, ShouldNotBeNil) So(err.Error(), ShouldEqual, "invalid character '-' in numeric literal") + + err = deleteTaskFile(YAML_FILE) + So(err, ShouldBeNil) }) Convey("Proper JSON file no workflow routine", t, func() { + err := createTaskFile(JSON_FILE, JSON_FILE_CONTENT) + So(err, ShouldBeNil) + file, err := os.Open(JSON_FILE) So(file, ShouldNotBeNil) So(err, ShouldBeNil) @@ -178,9 +297,15 @@ func TestCreateTaskFromContent(t *testing.T) { So(task, ShouldBeNil) So(err, ShouldNotBeNil) So(err.Error(), ShouldEqual, "Missing workflow creation routine") + + err = deleteTaskFile(JSON_FILE) + So(err, ShouldBeNil) }) Convey("Proper JSON file erroring routine", t, func() { + err := createTaskFile(JSON_FILE, JSON_FILE_CONTENT) + So(err, ShouldBeNil) + file, err := os.Open(JSON_FILE) So(file, ShouldNotBeNil) So(err, ShouldBeNil) @@ -189,9 +314,15 @@ func TestCreateTaskFromContent(t *testing.T) { So(task, ShouldBeNil) So(err, ShouldNotBeNil) So(err.Error(), ShouldEqual, "Dummy error") + + err = deleteTaskFile(JSON_FILE) + So(err, ShouldBeNil) }) Convey("Proper JSON file proper routine", t, func() { + err := createTaskFile(JSON_FILE, JSON_FILE_CONTENT) + So(err, ShouldBeNil) + file, err := os.Open(JSON_FILE) So(file, ShouldNotBeNil) So(err, ShouldBeNil) @@ -199,5 +330,8 @@ func TestCreateTaskFromContent(t *testing.T) { task, err := CreateTaskFromContent(file, &autoStart, okRoutine) So(task, ShouldBeNil) So(err, ShouldBeNil) + + err = deleteTaskFile(JSON_FILE) + So(err, ShouldBeNil) }) } diff --git a/docs/REST_API.md b/docs/REST_API.md index 161909495..280130285 100644 --- a/docs/REST_API.md +++ b/docs/REST_API.md @@ -462,11 +462,50 @@ _**Example Response**_ {"type":"metric-event","message":"","event":[{"namespace":"/intel/mock/host0/baz","data":87,"source":"egu-mac01.lan","timestamp":"2015-11-19T23:45:42.075605924-08:00"},{"namespace":"/intel/mock/host1/baz","data":89,"source":"egu-mac01.lan","timestamp":"2015-11-19T23:45:42.075609242-08:00"},{"namespace":"/intel/mock/host2/baz","data":84,"source":"egu-mac01.lan","timestamp":"2015-11-19T23:45:42.075611747-08:00"},{"namespace":"/intel/mock/host3/baz","data":82,"source":"egu-mac01.lan","timestamp":"2015-11-19T23:45:42.075613786-08:00"}... ``` **POST /v1/tasks**: -Create a task with the JSON input +Create a task with the JSON input, using for example mock-file.json with following content: +```json +{ + "version": 1, + "schedule": { + "type": "simple", + "interval": "1s" + }, + "max-failures": 10, + "workflow": { + "collect": { + "metrics": { + "/intel/mock/foo": {}, + "/intel/mock/bar": {}, + "/intel/mock/*/baz": {} + }, + "config": { + "/intel/mock": { + "name": "root", + "password": "secret" + } + }, + "process": [ + { + "plugin_name": "passthru", + "process": null, + "publish": [ + { + "plugin_name": "mock-file", + "config": { + "file": "/tmp/published" + } + } + ] + } + ] + } + } +} +``` _**Example Request**_ ``` -curl -vXPOST http://localhost:8181/v1/tasks -d @../examples/tasks/mock-file.json --header "Content-Type: application/json" +curl -vXPOST http://localhost:8181/v1/tasks -d @mock-file.json --header "Content-Type: application/json" ``` _**Example Response**_ ```json @@ -496,8 +535,8 @@ _**Example Response**_ }, "config": { "/intel/mock": { - "password": "secret", - "user": "root" + "user": "root", + "password": "secret" } }, "process": [ @@ -506,7 +545,7 @@ _**Example Response**_ "plugin_version": 0, "publish": [ { - "plugin_name": "file", + "plugin_name": "mock-file", "plugin_version": 0, "config": { "file": "/tmp/published" diff --git a/docs/SNAPCTL.md b/docs/SNAPCTL.md index 77297976d..115708f05 100644 --- a/docs/SNAPCTL.md +++ b/docs/SNAPCTL.md @@ -100,29 +100,96 @@ Example Usage ------------- ### Load and unload plugins, create and start a task -(Log level is set to 1 and signing is turned off for this example.) +In one terminal window, run snapd (log level is set to 1 and signing is turned off for this example): ``` $ $SNAP_PATH/bin/snapd -l 1 -t 0 ``` +prepare a task manifest file, for example task.json with following content: +```json +{ + "version": 1, + "schedule": { + "type": "simple", + "interval": "1s" + }, + "max-failures": 10, + "workflow": { + "collect": { + "metrics": { + "/intel/mock/foo": {}, + "/intel/mock/bar": {}, + "/intel/mock/*/baz": {} + }, + "config": { + "/intel/mock": { + "name": "root", + "password": "secret" + } + }, + "process": [ + { + "plugin_name": "passthru", + "process": null, + "publish": [ + { + "plugin_name": "mock-file", + "config": { + "file": "/tmp/snap_published_mock_file.log" + } + } + ] + } + ] + } + } +} +``` + +prepare a workflow manifest file, for example workflow.json with following content: +```json +{ + "collect": { + "metrics": { + "/intel/mock/foo": {} + }, + "config": { + "/intel/mock/foo": { + "password": "testval" + } + }, + "process": [], + "publish": [ + { + "plugin_name": "mock-file", + "config": { + "file": "/tmp/rest.test" + } + } + ] + } +} +``` + +and then: + 1. load a collector plugin 2. load a processing plugin 3. load a publishing plugin 4. list the plugins 5. start a task with a task manifest 6. start a task with a workflow manifest -8. list the tasks -9. unload a plugins +7. list the tasks +8. unload a plugins ``` - $ $SNAP_PATH/bin/snapctl plugin load $SNAP_PATH/plugin/snap-plugin-collector-mock1 $ $SNAP_PATH/bin/snapctl plugin load $SNAP_PATH/plugin/snap-plugin-processor-passthru $ $SNAP_PATH/bin/snapctl plugin load $SNAP_PATH/plugin/snap-plugin-publisher-mock-file $ $SNAP_PATH/bin/snapctl plugin list -$ $SNAP_PATH/bin/snapctl task create -t $SNAP_PATH/../examples/tasks/mock-file.json -$ $SNAP_PATH/bin/snapctl task create -w $SNAP_PATH/../mgmt/rest/wmap_sample/1.json -i 1s -d 10s +$ $SNAP_PATH/bin/snapctl task create -t mock-file.json +$ $SNAP_PATH/bin/snapctl task create -w workflow.json -i 1s -d 10s $ $SNAP_PATH/bin/snapctl task list $ $SNAP_PATH/bin/snapctl plugin unload -t collector -n mock -v $ $SNAP_PATH/bin/snapctl plugin unload -t processor -n passthru -v diff --git a/docs/TASKS.md b/docs/TASKS.md index 524b7b817..85d21f84c 100644 --- a/docs/TASKS.md +++ b/docs/TASKS.md @@ -27,7 +27,7 @@ The schedule describes the schedule type and interval for running the task. The - **simple schedule** which is described above, - **window schedule** which adds a start and stop time, - **cron schedule** which supports cron-like entries in ```interval``` field, like in this example (workflow will fire every hour on the half hour): -``` +```json "version": 1, "schedule": { "type": "cron", @@ -38,9 +38,9 @@ The schedule describes the schedule type and interval for running the task. The More on cron expressions can be found here: https://godoc.org/github.com/robfig/cron #### Max-Failures -By default, snap will disable a task if there is 10 consecutive errors from any plugins within the workflow. The configuration -can be changed by specifying the number of failures value in the task header. If the max-failures value is -1, snap will -not disable a task with consecutive failure. Instead, snap will sleep for 1 second for every 10 consective failures +By default, Snap will disable a task if there is 10 consecutive errors from any plugins within the workflow. The configuration +can be changed by specifying the number of failures value in the task header. If the max-failures value is -1, Snap will +not disable a task with consecutive failure. Instead, Snap will sleep for 1 second for every 10 consective failures and retry again. For more on tasks, visit [`SNAPCTL.md`](SNAPCTL.md). @@ -77,7 +77,7 @@ The workflow is a [DAG](https://en.wikipedia.org/wiki/Directed_acyclic_graph) wh #### Remote Targets -Process and Publish nodes in the workflow can also target remote snap nodes via the 'target' key. The purpose of this is to allow offloading of resource intensive workflow steps from the node where data collection is occuring. Modifying the example above we have: +Process and Publish nodes in the workflow can also target remote Snap nodes via the 'target' key. The purpose of this is to allow offloading of resource intensive workflow steps from the node where data collection is occuring. Modifying the example above we have: ```yaml --- @@ -108,9 +108,53 @@ Process and Publish nodes in the workflow can also target remote snap nodes via ``` -If a target is specified for a step in the workflow, that step will be executed on the remote instance specified by the ip:port target. Each node in the workflow is evaluated independently so a workflow can have any, all, or none of it's steps being done remotely (if `target` key is omitted, that step defaults to local). The ip and port target are the ip and port that has a running control-grpc server. These can be specified to snapd via the `control-listen-addr` and `control-listen-port` flags. The default is the same ip as the snap rest-api and port 8082. +If a target is specified for a step in the workflow, that step will be executed on the remote instance specified by the ip:port target. Each node in the workflow is evaluated independently so a workflow can have any, all, or none of it's steps being done remotely (if `target` key is omitted, that step defaults to local). The ip and port target are the ip and port that has a running control-grpc server. These can be specified to snapd via the `control-listen-addr` and `control-listen-port` flags. The default is the same ip as the Snap rest-api and port 8082. + +An example json task that uses remote targets: +```json +{ + "version": 1, + "schedule": { + "type": "simple", + "interval": "1s" + }, + "max-failures": 10, + "workflow": { + "collect": { + "metrics": { + "/intel/mock/foo": {}, + "/intel/mock/bar": {}, + "/intel/mock/*/baz": {} + }, + "config": { + "/intel/mock": { + "user": "root", + "password": "secret" + } + }, + "process": [ + { + "plugin_name": "passthru", + "target": "127.0.0.1:9999", + "process": null, + "publish": [ + { + "plugin_name": "file", + "target": "127.0.0.1:9992", + "config": { + "file": "/tmp/snap_published_mock_file.log" + } + } + ] + } + ] + } + } +} + +``` -An example json task that uses remote targets can be found under [examples](https://github.com/intelsdi-x/snap/blob/master/examples/tasks/distributed-mock-file.json). More information about the architecture behind this can be found [here](DISTRIBUTED_WORKFLOW_ARCHITECTURE.md). +More information about the architecture behind this can be found [here](DISTRIBUTED_WORKFLOW_ARCHITECTURE.md). #### collect diff --git a/examples/README.md b/examples/README.md index 2de71298f..d046efb70 100644 --- a/examples/README.md +++ b/examples/README.md @@ -17,11 +17,16 @@ See the License for the specific language governing permissions and limitations under the License. --> -# Examples -Example package contains a collection of snap examples that demonstrate the snap libraries and usages. + [configs](./configs) folder contains examples of [the global configuration file](../docs/SNAPD_CONFIGURATION.md#snapd-configuration-file) that powers your plugins. -* [configs](./configs) is an example of the global configuration file that powers your plugins -* [influxdb-grafana](./influxdb-grafana) Has an example that shows - publishing [psutil](https://github.com/intelsdi-x/snap-plugin-collector-psutil) (Processes and System Utilization) data into [Influxdb](https://github.com/intelsdi-x/snap-plugin-publisher-influxdb) and using Grafana to view the results -* [riemann](./riemann) has an example of [snap Riemann publisher plugin](https://github.com/intelsdi-x/snap-plugin-publisher-riemann) -* [tasks](./tasks) has JSON and YAML formatted execution requests for snap tasks \ No newline at end of file + For more examples of using Snap, checkout the examples in these repositories: + - [snap-plugin-collector-docker](https://github.com/intelsdi-x/snap-plugin-collector-docker), + - [snap-plugin-collector-ethtool](https://github.com/intelsdi-x/snap-plugin-collector-ethtool), + - [snap-plugin-collector-cpu](https://github.com/intelsdi-x/snap-plugin-collector-cpu), + - [snap-plugin-collector-disk](https://github.com/intelsdi-x/snap-plugin-collector-disk), + - [snap-plugin-collector-psutil](https://github.com/intelsdi-x/snap-plugin-collector-psutil), + - [snap-plugin-collector-meminfo](https://github.com/intelsdi-x/snap-plugin-collector-meminfo), + - [snap-plugin-processor-statistics](https://github.com/intelsdi-x/snap-plugin-processor-statistics), + - [snap-plugin-publisher-influxdb](https://github.com/intelsdi-x/snap-plugin-publisher-influxdb), + - [snap-plugin-publisher-graphite](https://github.com/intelsdi-x/snap-plugin-publisher-graphite), + - [snap-plugin-publisher-file](https://github.com/intelsdi-x/snap-plugin-publisher-file). diff --git a/examples/influxdb-grafana/README.md b/examples/influxdb-grafana/README.md deleted file mode 100644 index cc2c12f42..000000000 --- a/examples/influxdb-grafana/README.md +++ /dev/null @@ -1,49 +0,0 @@ - - -### How to run the example - -This example includes configuring and starting influxdb (a time series database) and grafana (a metrics dashboard). - -The example: - - Starts containers for Influxdb and Grafana (using Docker) - - Starts snapd - - Gets, loads and starts snap plugins - - Creates and starts a task that collects from psutil and publishes to InfluxDB - -Start the example by running `run-psutil.sh` - -![run-psutil.sh](http://i.giphy.com/d2Zhwlh8lMZM9nkQ.gif) - -### Requirements -- docker -- docker-compose -- netcat -- SNAP_PATH env var set to the snap build directory - -Note: The script also supports docker-machine but doesn't require it. - -### Issues/Warning - -- Make sure the time on your docker-machine vm is syncd with the time on your host - -- There is an unresolved issue with the 1.12.0-rc4-beta19 (build: 10258) Docker for Mac Beta that will throw an error ("unexpected EOF") while attempting to publish to the InfluxDB container. When using Mac OS X, it is suggested to use an InfluxDB daemon (`influxd`) or to utilize a virtual machine with a docker-supported Linux distribution - - - diff --git a/examples/influxdb-grafana/docker-compose.yml b/examples/influxdb-grafana/docker-compose.yml deleted file mode 100644 index a57c32e23..000000000 --- a/examples/influxdb-grafana/docker-compose.yml +++ /dev/null @@ -1,16 +0,0 @@ -grafana: - build: ./grafana/ - ports: - - "80:80" - - "3000:3000" - links: - - influxdb -influxdb: - build: ./influxdb/0.9/ - ports: - - "8086:8086" - - "8083:8083" - expose: - - "8090" - - "8099" - diff --git a/examples/influxdb-grafana/grafana/Dockerfile b/examples/influxdb-grafana/grafana/Dockerfile deleted file mode 100644 index e1e9e9115..000000000 --- a/examples/influxdb-grafana/grafana/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM debian:wheezy - -RUN apt-get update && apt-get -y install libfontconfig wget adduser openssl ca-certificates - -RUN wget http://grafanarel.s3.amazonaws.com/builds/grafana_latest_amd64.deb - -RUN dpkg -i grafana_latest_amd64.deb - -RUN apt-get install -y curl netcat - -EXPOSE 3000 - -VOLUME ["/var/lib/grafana"] -VOLUME ["/var/log/grafana"] -VOLUME ["/etc/grafana"] - -WORKDIR /usr/share/grafana - -ENTRYPOINT ["/usr/sbin/grafana-server", "--config", "/etc/grafana/grafana.ini"] - diff --git a/examples/influxdb-grafana/grafana/psutil.json b/examples/influxdb-grafana/grafana/psutil.json deleted file mode 100644 index ece591492..000000000 --- a/examples/influxdb-grafana/grafana/psutil.json +++ /dev/null @@ -1,543 +0,0 @@ -{ - "overwrite": true, - "dashboard": { - "id": null, - "title": "snap Dashboard", - "originalTitle": "snap Dashboard", - "tags": [], - "style": "dark", - "timezone": "browser", - "editable": true, - "hideControls": false, - "sharedCrosshair": false, - "rows": [ - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "influx", - "editable": true, - "error": false, - "fill": 1, - "grid": { - "leftLogBase": 1, - "leftMax": null, - "leftMin": null, - "rightLogBase": 1, - "rightMax": null, - "rightMin": null, - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "load1", - "dsType": "influxdb", - "function": "mean", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "groupByTags": [], - "measurement": "intel/psutil/load/load1", - "query": "SELECT mean(\"value\") FROM \"intel/psutil/load/load1\" WHERE $timeFilter GROUP BY time($interval) fill(null)", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - }, - { - "alias": "load5", - "dsType": "influxdb", - "function": "mean", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "groupByTags": [], - "measurement": "intel/psutil/load/load5", - "query": "SELECT mean(\"value\") FROM \"intel/psutil/load/load5\" WHERE $timeFilter GROUP BY time($interval) fill(null)", - "refId": "B", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [], - "target": "" - }, - { - "alias": "load15", - "dsType": "influxdb", - "function": "mean", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "groupByTags": [], - "measurement": "intel/psutil/load/load15", - "query": "SELECT mean(\"value\") FROM \"intel/psutil/load/load15\" WHERE $timeFilter GROUP BY time($interval) fill(null)", - "refId": "C", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [], - "target": "" - } - ], - "timeFrom": "5m", - "timeShift": null, - "title": "load", - "tooltip": { - "shared": true, - "value_type": "cumulative" - }, - "type": "graph", - "x-axis": true, - "y-axis": true, - "y_formats": [ - "short", - "short" - ] - } - ], - "title": "Row" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "influx", - "editable": true, - "error": false, - "fill": 1, - "grid": { - "leftLogBase": 1, - "leftMax": null, - "leftMin": null, - "rightLogBase": 1, - "rightMax": null, - "rightMin": null, - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 2, - "leftYAxisLabel": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "available", - "dsType": "influxdb", - "function": "mean", - "groupBy": [ - { - "type": "time", - "interval": "auto", - "params": [ - "auto" - ] - } - ], - "measurement": "intel/psutil/vm/available", - "query": "SELECT distinct(\"value\") FROM \"intel/psutil/vm/available\" WHERE $timeFilter GROUP BY time($interval)", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "type": "field", - "params": [ - "value" - ] - }, - { - "type": "distinct", - "params": [] - } - ] - ], - "tags": [] - } - ], - "timeFrom": "5m", - "timeShift": null, - "title": "Mem Avail", - "tooltip": { - "shared": true, - "value_type": "cumulative" - }, - "type": "graph", - "x-axis": true, - "y-axis": true, - "y_formats": [ - "bytes", - "short" - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "influx", - "editable": true, - "error": false, - "fill": 1, - "grid": { - "leftLogBase": 1, - "leftMax": null, - "leftMin": null, - "rightLogBase": 1, - "rightMax": null, - "rightMin": null, - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "alias": "used", - "dsType": "influxdb", - "function": "distinct", - "groupBy": [ - { - "interval": "auto", - "params": [ - "auto" - ], - "type": "time" - } - ], - "measurement": "intel/psutil/vm/used", - "query": "SELECT distinct(\"value\") FROM \"intel/psutil/vm/used\" WHERE $timeFilter GROUP BY time($interval)", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "distinct" - } - ] - ], - "tags": [] - } - ], - "timeFrom": "5m", - "timeShift": null, - "title": "Mem Used", - "tooltip": { - "shared": true, - "value_type": "cumulative" - }, - "type": "graph", - "x-axis": true, - "y-axis": true, - "y_formats": [ - "bytes", - "short" - ] - } - ], - "title": "New row" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "influx", - "editable": true, - "error": false, - "fill": 1, - "grid": { - "leftLogBase": 1, - "leftMax": null, - "leftMin": null, - "rightLogBase": 1, - "rightMax": null, - "rightMin": null, - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "dsType": "influxdb", - "fields": [ - { - "func": "mean", - "name": "value" - } - ], - "groupBy": [ - { - "type": "time", - "interval": "auto", - "params": [ - "auto" - ] - } - ], - "measurement": "intel/psutil/vm/free", - "query": "SELECT distinct(\"value\") FROM \"intel/psutil/vm/free\" WHERE $timeFilter GROUP BY time($interval)", - "refId": "A", - "resultFormat": "time_series", - "select": [ - [ - { - "type": "field", - "params": [ - "value" - ] - }, - { - "type": "distinct", - "params": [] - } - ] - ], - "tags": [] - } - ], - "timeFrom": "5m", - "timeShift": null, - "title": "Mem Free", - "tooltip": { - "shared": true, - "value_type": "cumulative" - }, - "type": "graph", - "x-axis": true, - "y-axis": true, - "y_formats": [ - "short", - "short" - ] - } - ], - "title": "New row" - } - ], - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "collapse": false, - "enable": true, - "notice": false, - "now": true, - "refresh_intervals": [ - "1s", - "2s", - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "status": "Stable", - "time_options": [ - "1m", - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ], - "type": "timepicker" - }, - "templating": { - "list": [] - }, - "annotations": { - "list": [] - }, - "refresh": "2s", - "schemaVersion": 8, - "version": 5, - "links": [] -} -} \ No newline at end of file diff --git a/examples/influxdb-grafana/influxdb/0.9/Dockerfile b/examples/influxdb-grafana/influxdb/0.9/Dockerfile deleted file mode 100644 index f0d1d30d2..000000000 --- a/examples/influxdb-grafana/influxdb/0.9/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM tutum/curl:trusty -MAINTAINER Feng Honglin - -# Install InfluxDB -ENV INFLUXDB_VERSION 0.9.4.1 -RUN curl -s -o /tmp/influxdb_latest_amd64.deb https://s3.amazonaws.com/influxdb/influxdb_${INFLUXDB_VERSION}_amd64.deb && \ - dpkg -i /tmp/influxdb_latest_amd64.deb && \ - rm /tmp/influxdb_latest_amd64.deb && \ - rm -rf /var/lib/apt/lists/* - -ADD types.db /usr/share/collectd/types.db -ADD config.toml /config/config.toml -ADD run.sh /run.sh -RUN chmod +x /*.sh - -ENV PRE_CREATE_DB **None** -ENV SSL_SUPPORT **False** -ENV SSL_CERT **None** - -# Admin server WebUI -EXPOSE 8083 - -# HTTP API -EXPOSE 8086 - -# Raft port (for clustering, don't expose publicly!) -#EXPOSE 8090 - -# Protobuf port (for clustering, don't expose publicly!) -#EXPOSE 8099 - -VOLUME ["/data"] - -CMD ["/run.sh"] diff --git a/examples/influxdb-grafana/influxdb/0.9/config.toml b/examples/influxdb-grafana/influxdb/0.9/config.toml deleted file mode 100644 index 3b7850723..000000000 --- a/examples/influxdb-grafana/influxdb/0.9/config.toml +++ /dev/null @@ -1,235 +0,0 @@ -### Welcome to the InfluxDB configuration file. - -# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com -# The data includes raft id (random 8 bytes), os, arch, version, and metadata. -# We don't track ip addresses of servers reporting. This is only used -# to track the number of instances running and the versions, which -# is very helpful for us. -# Change this option to true to disable reporting. -reporting-disabled = false - -### -### [meta] -### -### Controls the parameters for the Raft consensus group that stores metadata -### about the InfluxDB cluster. -### - -[meta] - dir = "/data/meta" - hostname = "localhost" - bind-address = ":8088" - retention-autocreate = true - election-timeout = "1s" - heartbeat-timeout = "1s" - leader-lease-timeout = "500ms" - commit-timeout = "50ms" - -### -### [data] -### -### Controls where the actual shard data for InfluxDB lives and how it is -### flushed from the WAL. "dir" may need to be changed to a suitable place -### for your system, but the WAL settings are an advanced configuration. The -### defaults should work for most systems. -### - -[data] - dir = "/data/db" - - # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't - # apply to any new shards created after upgrading to a version > 0.9.3. - max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. - wal-flush-interval = "10m0s" # Maximum time data can sit in WAL before a flush. - wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. - - # These are the WAL settings for the storage engine >= 0.9.3 - wal-dir = "/data/wal" - wal-enable-logging = true - - # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to - # flush to the index - # wal-ready-series-size = 25600 - - # Flush and compact a partition once this ratio of series are over the ready size - # wal-compaction-threshold = 0.6 - - # Force a flush and compaction if any series in a partition gets above this size in bytes - # wal-max-series-size = 2097152 - - # Force a flush of all series and full compaction if there have been no writes in this - # amount of time. This is useful for ensuring that shards that are cold for writes don't - # keep a bunch of data cached in memory and in the WAL. - # wal-flush-cold-interval = "10m" - - # Force a partition to flush its largest series if it reaches this approximate size in - # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. - # The more memory you have, the bigger this can be. - # wal-partition-size-threshold = 20971520 - -### -### [cluster] -### -### Controls non-Raft cluster behavior, which generally includes how data is -### shared across shards. -### -[cluster] - write-timeout = "5s" # The time within which a write operation must complete on the cluster. - shard-writer-timeout = "5s" # The time within which a shard must respond to write. - -### -### [retention] -### -### Controls the enforcement of retention policies for evicting old data. -### -[retention] - enabled = true - check-interval = "10m0s" - -### -### [admin] -### -### Controls the availability of the built-in, web-based admin interface. If HTTPS is -### enabled for the admin interface, HTTPS must also be enabled on the [http] service. -### -[admin] - enabled = true - bind-address = ":8083" - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - -### -### [http] -### -### Controls how the HTTP endpoints are configured. These are the primary -### mechanism for getting data into and out of InfluxDB. -### -[http] - enabled = true - bind-address = ":8086" - auth-enabled = false - log-enabled = true - write-tracing = false - pprof-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - -### -### [[graphite]] -### -### Controls one or many listeners for Graphite data. -### -[[graphite]] - enabled = false - bind-address = ":2003" - protocol = "tcp" - consistency-level = "one" - separator = "." - database = "graphitedb" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - batch-size = 1000 - batch-timeout = "1s" - templates = [ - # filter + template - #"*.app env.service.resource.measurement", - - # filter + template + extra tag - #"stats.* .host.measurement* region=us-west,agent=sensu", - - # default template. Ignore the first graphite component "servers" - "instance.profile.measurement*" - ] - -### -### [collectd] -### -### Controls the listener for collectd data. -### -[collectd] - enabled = false - # bind-address = ":25826" - # database = "collectd" - # retention-policy = "" - # typesdb = "/usr/share/collectd/types.db" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - -### -### [opentsdb] -### -### Controls the listener for OpenTSDB data. -### -[opentsdb] - enabled = false - # bind-address = ":4242" - # database = "opentsdb" - # retention-policy = "" - # consistency-level = "one" - -### -### [[udp]] -### -### Controls the listeners for InfluxDB line protocol data via UDP. -### - -[[udp]] - enabled = false - bind-address = ":4444" - database = "udpdb" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - -### -### [monitoring] -### -### Send anonymous usage statistics to m.influxdb.com? -### -[monitoring] - enabled = false - write-interval = "24h" - -### -### [continuous_queries] -### -### Controls how continuous queries are run within InfluxDB. -### - -[continuous_queries] - log-enabled = true - enabled = true - recompute-previous-n = 2 - recompute-no-older-than = "10m0s" - compute-runs-per-interval = 10 - compute-no-more-than = "2m0s" - -### -### [hinted-handoff] -### -### Controls the hinted handoff feature, which allows nodes to temporarily -### store queued data when one node of a cluster is down for a short period -### of time. -### - -[hinted-handoff] - enabled = true - dir = "/data/hh" - max-size = 1073741824 - max-age = "168h" - retry-rate-limit = 0 - retry-interval = "1s" diff --git a/examples/influxdb-grafana/influxdb/0.9/run.sh b/examples/influxdb-grafana/influxdb/0.9/run.sh deleted file mode 100755 index 21ff8d8dd..000000000 --- a/examples/influxdb-grafana/influxdb/0.9/run.sh +++ /dev/null @@ -1,156 +0,0 @@ -#!/bin/bash - -#http://www.apache.org/licenses/LICENSE-2.0.txt -# -# -#Copyright 2015 Intel Corporation -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - -set -m -CONFIG_FILE="/config/config.toml" -INFLUX_HOST="localhost" -INFLUX_API_PORT="8086" -API_URL="http://${INFLUX_HOST}:${INFLUX_API_PORT}" - -# Dynamically change the value of 'max-open-shards' to what 'ulimit -n' returns -sed -i "s/^max-open-shards.*/max-open-shards = $(ulimit -n)/" ${CONFIG_FILE} - -# Configure InfluxDB Cluster -if [ -n "${FORCE_HOSTNAME}" ]; then - if [ "${FORCE_HOSTNAME}" == "auto" ]; then - #set hostname with IPv4 eth0 - HOSTIPNAME=$(ip a show dev eth0 | grep inet | grep eth0 | sed -e 's/^.*inet.//g' -e 's/\/.*$//g') - /usr/bin/perl -p -i -e "s/^# hostname.*$/hostname = \"${HOSTIPNAME}\"/g" ${CONFIG_FILE} - else - /usr/bin/perl -p -i -e "s/^# hostname.*$/hostname = \"${FORCE_HOSTNAME}\"/g" ${CONFIG_FILE} - fi -fi - -# NOTE: 'seed-servers.' is nowhere to be found in config.toml, this cannot work anymore! NEED FOR REVIEW! -# if [ -n "${SEEDS}" ]; then -# SEEDS=$(eval SEEDS=$SEEDS ; echo $SEEDS | grep '^\".*\"$' || echo "\""$SEEDS"\"" | sed -e 's/, */", "/g') -# /usr/bin/perl -p -i -e "s/^# seed-servers.*$/seed-servers = [${SEEDS}]/g" ${CONFIG_FILE} -# fi - -if [ -n "${REPLI_FACTOR}" ]; then - /usr/bin/perl -p -i -e "s/replication-factor = 1/replication-factor = ${REPLI_FACTOR}/g" ${CONFIG_FILE} -fi - -if [ "${PRE_CREATE_DB}" == "**None**" ]; then - unset PRE_CREATE_DB -fi - -# NOTE: It seems this is not used anymore... -# -# if [ "${SSL_CERT}" == "**None**" ]; then -# unset SSL_CERT -# fi -# -# if [ "${SSL_SUPPORT}" == "**False**" ]; then -# unset SSL_SUPPORT -# fi - -# Add Graphite support -if [ -n "${GRAPHITE_DB}" ]; then - echo "GRAPHITE_DB: ${GRAPHITE_DB}" - sed -i -r -e "/^\[\[graphite\]\]/, /^$/ { s/false/true/; s/\"graphitedb\"/\"${GRAPHITE_DB}\"/g; }" ${CONFIG_FILE} -fi - -if [ -n "${GRAPHITE_BINDING}" ]; then - echo "GRAPHITE_BINDING: ${GRAPHITE_BINDING}" - sed -i -r -e "/^\[\[graphite\]\]/, /^$/ { s/\:2003/${GRAPHITE_BINDING}/; }" ${CONFIG_FILE} -fi - -if [ -n "${GRAPHITE_PROTOCOL}" ]; then - echo "GRAPHITE_PROTOCOL: ${GRAPHITE_PROTOCOL}" - sed -i -r -e "/^\[\[graphite\]\]/, /^$/ { s/tcp/${GRAPHITE_PROTOCOL}/; }" ${CONFIG_FILE} -fi - -if [ -n "${GRAPHITE_TEMPLATE}" ]; then - echo "GRAPHITE_TEMPLATE: ${GRAPHITE_TEMPLATE}" - sed -i -r -e "/^\[\[graphite\]\]/, /^$/ { s/instance\.profile\.measurement\*/${GRAPHITE_TEMPLATE}/; }" ${CONFIG_FILE} -fi - -# Add Collectd support -if [ -n "${COLLECTD_DB}" ]; then - echo "COLLECTD_DB: ${COLLECTD_DB}" - sed -i -r -e "/^\[collectd\]/, /^$/ { s/false/true/; s/( *)# *(.*)\"collectd\"/\1\2\"${COLLECTD_DB}\"/g;}" ${CONFIG_FILE} -fi -if [ -n "${COLLECTD_BINDING}" ]; then - echo "COLLECTD_BINDING: ${COLLECTD_BINDING}" - sed -i -r -e "/^\[collectd\]/, /^$/ { s/( *)# *(.*)\":25826\"/\1\2\"${COLLECTD_BINDING}\"/g;}" ${CONFIG_FILE} -fi -if [ -n "${COLLECTD_RETENTION_POLICY}" ]; then - echo "COLLECTD_RETENTION_POLICY: ${COLLECTD_RETENTION_POLICY}" - sed -i -r -e "/^\[collectd\]/, /^$/ { s/( *)# *(retention-policy.*)\"\"/\1\2\"${COLLECTD_RETENTION_POLICY}\"/g;}" ${CONFIG_FILE} -fi - -# Add UDP support -if [ -n "${UDP_DB}" ]; then - sed -i -r -e "/^\[\[udp\]\]/, /^$/ { s/false/true/; s/#//g; s/\"udpdb\"/\"${UDP_DB}\"/g; }" ${CONFIG_FILE} -fi -if [ -n "${UDP_PORT}" ]; then - sed -i -r -e "/^\[\[udp\]\]/, /^$/ { s/4444/${UDP_PORT}/; }" ${CONFIG_FILE} -fi - - -echo "influxdb configuration: " -cat ${CONFIG_FILE} -echo "=> Starting InfluxDB ..." -exec /opt/influxdb/influxd -config=${CONFIG_FILE} & - -# Pre create database on the initiation of the container -if [ -n "${PRE_CREATE_DB}" ]; then - echo "=> About to create the following database: ${PRE_CREATE_DB}" - if [ -f "/data/.pre_db_created" ]; then - echo "=> Database had been created before, skipping ..." - else - arr=$(echo ${PRE_CREATE_DB} | tr ";" "\n") - - #wait for the startup of influxdb - RET=1 - while [[ RET -ne 0 ]]; do - echo "=> Waiting for confirmation of InfluxDB service startup ..." - sleep 3 - curl -k ${API_URL}/ping 2> /dev/null - RET=$? - done - echo "" - - PASS=${INFLUXDB_INIT_PWD:-root} - if [ -n "${ADMIN_USER}" ]; then - echo "=> Creating admin user" - /opt/influxdb/influx -host=${INFLUX_HOST} -port=${INFLUX_API_PORT} -execute="CREATE USER ${ADMIN_USER} WITH PASSWORD '${PASS}' WITH ALL PRIVILEGES" - for x in $arr - do - echo "=> Creating database: ${x}" - /opt/influxdb/influx -host=${INFLUX_HOST} -port=${INFLUX_API_PORT} -username=${ADMIN_USER} -password="${PASS}" -execute="create database ${x}" - /opt/influxdb/influx -host=${INFLUX_HOST} -port=${INFLUX_API_PORT} -username=${ADMIN_USER} -password="${PASS}" -execute="grant all PRIVILEGES on ${x} to ${ADMIN_USER}" - done - echo "" - else - for x in $arr - do - echo "=> Creating database: ${x}" - /opt/influxdb/influx -host=${INFLUX_HOST} -port=${INFLUX_API_PORT} -execute="create database \"${x}\"" - done - fi - - touch "/data/.pre_db_created" - fi -else - echo "=> No database need to be pre-created" -fi - -fg diff --git a/examples/influxdb-grafana/influxdb/0.9/types.db b/examples/influxdb-grafana/influxdb/0.9/types.db deleted file mode 100644 index 38fb546c8..000000000 --- a/examples/influxdb-grafana/influxdb/0.9/types.db +++ /dev/null @@ -1,241 +0,0 @@ -absolute value:ABSOLUTE:0:U -apache_bytes value:DERIVE:0:U -apache_connections value:GAUGE:0:65535 -apache_idle_workers value:GAUGE:0:65535 -apache_requests value:DERIVE:0:U -apache_scoreboard value:GAUGE:0:65535 -ath_nodes value:GAUGE:0:65535 -ath_stat value:DERIVE:0:U -backends value:GAUGE:0:65535 -bitrate value:GAUGE:0:4294967295 -blocked_clients value:GAUGE:0:U -bytes value:GAUGE:0:U -cache_eviction value:DERIVE:0:U -cache_operation value:DERIVE:0:U -cache_ratio value:GAUGE:0:100 -cache_result value:DERIVE:0:U -cache_size value:GAUGE:0:U -ceph_bytes value:GAUGE:U:U -ceph_latency value:GAUGE:U:U -ceph_rate value:DERIVE:0:U -changes_since_last_save value:GAUGE:0:U -charge value:GAUGE:0:U -compression_ratio value:GAUGE:0:2 -compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U -connections value:DERIVE:0:U -conntrack value:GAUGE:0:4294967295 -contextswitch value:DERIVE:0:U -count value:GAUGE:0:U -counter value:COUNTER:U:U -cpufreq value:GAUGE:0:U -cpu value:DERIVE:0:U -current_connections value:GAUGE:0:U -current_sessions value:GAUGE:0:U -current value:GAUGE:U:U -delay value:GAUGE:-1000000:1000000 -derive value:DERIVE:0:U -df_complex value:GAUGE:0:U -df_inodes value:GAUGE:0:U -df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 -disk_latency read:GAUGE:0:U, write:GAUGE:0:U -disk_merged read:DERIVE:0:U, write:DERIVE:0:U -disk_octets read:DERIVE:0:U, write:DERIVE:0:U -disk_ops_complex value:DERIVE:0:U -disk_ops read:DERIVE:0:U, write:DERIVE:0:U -disk_time read:DERIVE:0:U, write:DERIVE:0:U -disk_io_time io_time:DERIVE:0:U, weighted_io_time:DERIVE:0:U -dns_answer value:DERIVE:0:U -dns_notify value:DERIVE:0:U -dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U -dns_opcode value:DERIVE:0:U -dns_qtype_cached value:GAUGE:0:4294967295 -dns_qtype value:DERIVE:0:U -dns_query value:DERIVE:0:U -dns_question value:DERIVE:0:U -dns_rcode value:DERIVE:0:U -dns_reject value:DERIVE:0:U -dns_request value:DERIVE:0:U -dns_resolver value:DERIVE:0:U -dns_response value:DERIVE:0:U -dns_transfer value:DERIVE:0:U -dns_update value:DERIVE:0:U -dns_zops value:DERIVE:0:U -drbd_resource value:DERIVE:0:U -duration seconds:GAUGE:0:U -email_check value:GAUGE:0:U -email_count value:GAUGE:0:U -email_size value:GAUGE:0:U -entropy value:GAUGE:0:4294967295 -expired_keys value:GAUGE:0:U -fanspeed value:GAUGE:0:U -file_handles value:GAUGE:0:U -file_size value:GAUGE:0:U -files value:GAUGE:0:U -flow value:GAUGE:0:U -fork_rate value:DERIVE:0:U -frequency_offset value:GAUGE:-1000000:1000000 -frequency value:GAUGE:0:U -fscache_stat value:DERIVE:0:U -gauge value:GAUGE:U:U -hash_collisions value:DERIVE:0:U -http_request_methods value:DERIVE:0:U -http_requests value:DERIVE:0:U -http_response_codes value:DERIVE:0:U -humidity value:GAUGE:0:100 -if_collisions value:DERIVE:0:U -if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U -if_errors rx:DERIVE:0:U, tx:DERIVE:0:U -if_multicast value:DERIVE:0:U -if_octets rx:DERIVE:0:U, tx:DERIVE:0:U -if_packets rx:DERIVE:0:U, tx:DERIVE:0:U -if_rx_errors value:DERIVE:0:U -if_rx_octets value:DERIVE:0:U -if_tx_errors value:DERIVE:0:U -if_tx_octets value:DERIVE:0:U -invocations value:DERIVE:0:U -io_octets rx:DERIVE:0:U, tx:DERIVE:0:U -io_packets rx:DERIVE:0:U, tx:DERIVE:0:U -ipt_bytes value:DERIVE:0:U -ipt_packets value:DERIVE:0:U -irq value:DERIVE:0:U -latency value:GAUGE:0:U -links value:GAUGE:0:U -load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 -md_disks value:GAUGE:0:U -memcached_command value:DERIVE:0:U -memcached_connections value:GAUGE:0:U -memcached_items value:GAUGE:0:U -memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U -memcached_ops value:DERIVE:0:U -memory value:GAUGE:0:281474976710656 -memory_lua value:GAUGE:0:281474976710656 -multimeter value:GAUGE:U:U -mutex_operations value:DERIVE:0:U -mysql_commands value:DERIVE:0:U -mysql_handler value:DERIVE:0:U -mysql_locks value:DERIVE:0:U -mysql_log_position value:DERIVE:0:U -mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U -mysql_bpool_pages value:GAUGE:0:U -mysql_bpool_bytes value:GAUGE:0:U -mysql_bpool_counters value:DERIVE:0:U -mysql_innodb_data value:DERIVE:0:U -mysql_innodb_dblwr value:DERIVE:0:U -mysql_innodb_log value:DERIVE:0:U -mysql_innodb_pages value:DERIVE:0:U -mysql_innodb_row_lock value:DERIVE:0:U -mysql_innodb_rows value:DERIVE:0:U -mysql_select value:DERIVE:0:U -mysql_sort value:DERIVE:0:U -nfs_procedure value:DERIVE:0:U -nginx_connections value:GAUGE:0:U -nginx_requests value:DERIVE:0:U -node_octets rx:DERIVE:0:U, tx:DERIVE:0:U -node_rssi value:GAUGE:0:255 -node_stat value:DERIVE:0:U -node_tx_rate value:GAUGE:0:127 -objects value:GAUGE:0:U -operations value:DERIVE:0:U -packets value:DERIVE:0:U -pending_operations value:GAUGE:0:U -percent value:GAUGE:0:100.1 -percent_bytes value:GAUGE:0:100.1 -percent_inodes value:GAUGE:0:100.1 -pf_counters value:DERIVE:0:U -pf_limits value:DERIVE:0:U -pf_source value:DERIVE:0:U -pf_states value:GAUGE:0:U -pf_state value:DERIVE:0:U -pg_blks value:DERIVE:0:U -pg_db_size value:GAUGE:0:U -pg_n_tup_c value:DERIVE:0:U -pg_n_tup_g value:GAUGE:0:U -pg_numbackends value:GAUGE:0:U -pg_scan value:DERIVE:0:U -pg_xact value:DERIVE:0:U -ping_droprate value:GAUGE:0:100 -ping_stddev value:GAUGE:0:65535 -ping value:GAUGE:0:65535 -players value:GAUGE:0:1000000 -power value:GAUGE:0:U -pressure value:GAUGE:0:U -protocol_counter value:DERIVE:0:U -ps_code value:GAUGE:0:9223372036854775807 -ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 -ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U -ps_data value:GAUGE:0:9223372036854775807 -ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U -ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U -ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U -ps_rss value:GAUGE:0:9223372036854775807 -ps_stacksize value:GAUGE:0:9223372036854775807 -ps_state value:GAUGE:0:65535 -ps_vm value:GAUGE:0:9223372036854775807 -pubsub value:GAUGE:0:U -queue_length value:GAUGE:0:U -records value:GAUGE:0:U -requests value:GAUGE:0:U -response_time value:GAUGE:0:U -response_code value:GAUGE:0:U -route_etx value:GAUGE:0:U -route_metric value:GAUGE:0:U -routes value:GAUGE:0:U -segments value:GAUGE:0:65535 -serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U -signal_noise value:GAUGE:U:0 -signal_power value:GAUGE:U:0 -signal_quality value:GAUGE:0:U -smart_poweron value:GAUGE:0:U -smart_powercycles value:GAUGE:0:U -smart_badsectors value:GAUGE:0:U -smart_temperature value:GAUGE:-300:300 -smart_attribute current:GAUGE:0:255, worst:GAUGE:0:255, threshold:GAUGE:0:255, pretty:GAUGE:0:U -snr value:GAUGE:0:U -spam_check value:GAUGE:0:U -spam_score value:GAUGE:U:U -spl value:GAUGE:U:U -swap_io value:DERIVE:0:U -swap value:GAUGE:0:1099511627776 -tcp_connections value:GAUGE:0:4294967295 -temperature value:GAUGE:U:U -threads value:GAUGE:0:U -time_dispersion value:GAUGE:-1000000:1000000 -timeleft value:GAUGE:0:U -time_offset value:GAUGE:-1000000:1000000 -total_bytes value:DERIVE:0:U -total_connections value:DERIVE:0:U -total_objects value:DERIVE:0:U -total_operations value:DERIVE:0:U -total_requests value:DERIVE:0:U -total_sessions value:DERIVE:0:U -total_threads value:DERIVE:0:U -total_time_in_ms value:DERIVE:0:U -total_values value:DERIVE:0:U -uptime value:GAUGE:0:4294967295 -users value:GAUGE:0:65535 -vcl value:GAUGE:0:65535 -vcpu value:GAUGE:0:U -virt_cpu_total value:DERIVE:0:U -virt_vcpu value:DERIVE:0:U -vmpage_action value:DERIVE:0:U -vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U -vmpage_io in:DERIVE:0:U, out:DERIVE:0:U -vmpage_number value:GAUGE:0:4294967295 -volatile_changes value:GAUGE:0:U -voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U -voltage value:GAUGE:U:U -vs_memory value:GAUGE:0:9223372036854775807 -vs_processes value:GAUGE:0:65535 -vs_threads value:GAUGE:0:65535 - -# -# Legacy types -# (required for the v5 upgrade target) -# -arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U -arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U -arc_l2_size value:GAUGE:0:U -arc_ratio value:GAUGE:0:U -arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U -mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U -mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U diff --git a/examples/influxdb-grafana/run-psutil.sh b/examples/influxdb-grafana/run-psutil.sh deleted file mode 100755 index f83bf73f8..000000000 --- a/examples/influxdb-grafana/run-psutil.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash - -#http://www.apache.org/licenses/LICENSE-2.0.txt -# -# -#Copyright 2015 Intel Corporation -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - -# add some color to the output -red=`tput setaf 1` -green=`tput setaf 2` -reset=`tput sgr0` - -die () { - echo >&2 "${red} $@ ${reset}" - exit 1 -} - -# verify deps and the env -[ "${SNAP_PATH}x" != "x" ] || die "Error: SNAP_PATH must be set" -type docker-compose >/dev/null 2>&1 || die "Error: docker-compose is required" -type docker >/dev/null 2>&1 || die "Error: docker is required" -type netcat >/dev/null 2>&1 || die "Error: netcat is required" -if type docker-machine >/dev/null 2>&1; then - docker-machine active >/dev/null 2>&1 || die "Error: no active docker host found" - dm_ip=$(docker-machine ip $(docker-machine active)) || die - if [ "$dm_ip" = "" ]; then - die "Error: Unable to identify IP for your docker-machine. Make sure that it's started." - fi - echo ">>docker machine ip: ${dm_ip}" -else - dm_ip="127.0.0.1" -fi - -#start containers -docker-compose up -d - -echo -n "waiting for influxdb and grafana to start" - -# wait for influxdb to start up -while ! curl --silent -G "http://${dm_ip}:8086/query?u=admin&p=admin" --data-urlencode "q=SHOW DATABASES" 2>&1 > /dev/null ; do - sleep 1 - echo -n "." -done -echo "" - -#influxdb IP -influx_ip=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' influxdbgrafana_influxdb_1) -echo "influxdb ip: ${influx_ip}" - -# create snap database in influxdb -curl -G "http://${dm_ip}:8086/ping" -echo -n ">>deleting snap influx db (if it exists) => " -curl -G "http://${dm_ip}:8086/query?u=admin&p=admin" --data-urlencode "q=DROP DATABASE snap" -echo "" -echo -n "creating snap influx db => " -curl -G "http://${dm_ip}:8086/query?u=admin&p=admin" --data-urlencode "q=CREATE DATABASE snap" -echo "" - -# create influxdb datasource in grafana -echo -n "${green}adding influxdb datasource to grafana => ${reset}" -COOKIEJAR=$(mktemp) -curl -H 'Content-Type: application/json;charset=UTF-8' \ - --data-binary '{"user":"admin","email":"","password":"admin"}' \ - --cookie-jar "$COOKIEJAR" \ - "http://${dm_ip}:3000/login" - -curl --cookie "$COOKIEJAR" \ - -X POST \ - --silent \ - -H 'Content-Type: application/json;charset=UTF-8' \ - --data-binary "{\"name\":\"influx\",\"type\":\"influxdb\",\"url\":\"http://${influx_ip}:8086\",\"access\":\"proxy\",\"database\":\"snap\",\"user\":\"admin\",\"password\":\"admin\"}" \ - "http://${dm_ip}:3000/api/datasources" -echo "" - -dashboard=$(cat $SNAP_PATH/../examples/influxdb-grafana/grafana/psutil.json) -curl --cookie "$COOKIEJAR" \ - -X POST \ - --silent \ - -H 'Content-Type: application/json;charset=UTF-8' \ - --data "$dashboard" \ - "http://${dm_ip}:3000/api/dashboards/db" -echo "" - -echo "${green}getting and building snap-plugin-publisher-influxdb${reset}" -go get github.com/intelsdi-x/snap-plugin-publisher-influxdb -# try and build; If the build first fails try again also getting deps else stop with an error -(cd $SNAP_PATH/../../snap-plugin-publisher-influxdb && make all) || (cd $SNAP_PATH/../../snap-plugin-publisher-influxdb && make) || die "Error: failed to get and compile influxdb plugin" - -echo "${green}getting and building snap-plugin-collector-psutil${reset}" -go get github.com/intelsdi-x/snap-plugin-collector-psutil -# try and build; If the build first fails try again also getting deps else stop with an error -(cd $SNAP_PATH/../../snap-plugin-collector-psutil && make all) || (cd $SNAP_PATH/../../snap-plugin-collector-psutil && make) || die "Error: failed to get and compile psutil plugin" - -echo -n "${green}starting snapd${reset}" -$SNAP_PATH/bin/snapd --log-level 1 -t 0 > /tmp/snap.out 2>&1 & -echo "" - -sleep 3 - -echo "${green}loading snap-plugin-publisher-influxdb${reset}" -($SNAP_PATH/bin/snapctl plugin load $SNAP_PATH/../../snap-plugin-publisher-influxdb/build/rootfs/snap-plugin-publisher-influxdb) || die "Error: failed to load influxdb plugin" - -echo "${green}loading snap-plugin-collector-psutil${reset}" -($SNAP_PATH/bin/snapctl plugin load $SNAP_PATH/../../snap-plugin-collector-psutil/build/rootfs/snap-plugin-collector-psutil) || die "Error: failed to load psutil plugin" - -echo -n "${greeN}adding task${reset}" -TMPDIR=${TMPDIR:="/tmp"} -TASK="${TMPDIR}/snap-task-$$.json" -echo "$TASK" -cat $SNAP_PATH/../examples/tasks/psutil-influx.json | sed s/INFLUXDB_IP/${dm_ip}/ > $TASK -$SNAP_PATH/bin/snapctl task create -t $TASK - -echo ""${green} -echo "Grafana Dashboard => http://${dm_ip}:3000/dashboard/db/snap-dashboard" -echo "Influxdb UI => http://${dm_ip}:8083" -echo "" -echo "Press enter to start viewing the snap.log${reset}" -read -tail -f /tmp/snap.out diff --git a/examples/riemann/Vagrantfile b/examples/riemann/Vagrantfile deleted file mode 100644 index 518e4df7d..000000000 --- a/examples/riemann/Vagrantfile +++ /dev/null @@ -1,41 +0,0 @@ -#http://www.apache.org/licenses/LICENSE-2.0.txt -# -# -#Copyright 2015 Intel Corporation -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - -# -*- mode: ruby -*- -# vi: set ft=ruby : - -#Vagrant::Config.run do |config| -Vagrant.configure("2") do |config| - config.vm.box = "chef/centos-7.0" - config.vm.provision :shell, :path => "./script/init.sh" - # config.vm.forward_port 4567, 4567 # dashboard - # config.vm.forward_port 5555, 5555, { :protocol => "udp"} # riemann server - # config.vm.forward_port 5556, 5556 # riemann server: Websocket - - config.vm.network :forwarded_port, {:guest => 4567, :host => 4567, :id => "dashboard", :auto_correct => true} - config.vm.network :forwarded_port, {:guest => 5555, :host => 5555, :id => "riemann", :auto_correct => true, :protocol => "udp"} - config.vm.network :forwarded_port, {:guest => 5556, :host => 5556, :id => "riemann_ws", :auto_correct => true} - # config.vm.network :forwarded_port, {:guest => 1080, :host => 1080, :id => "mailcatcher", :auto_correct => true} - - - config.vm.provider "vmware_fusion" do |v| - v.vmx["memsize"] = "1024" - v.vmx["numvcpus"] = "2" - end - - config.vm.synced_folder "../../", "/vagrant/go/src/github.com/intelsdi-x/snap" -end \ No newline at end of file diff --git a/examples/riemann/script/init.sh b/examples/riemann/script/init.sh deleted file mode 100755 index 52a0c15a6..000000000 --- a/examples/riemann/script/init.sh +++ /dev/null @@ -1,57 +0,0 @@ -#http://www.apache.org/licenses/LICENSE-2.0.txt -# -# -#Copyright 2015 Intel Corporation -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - -# deps -sudo yum -y install epel-release -sudo yum -y install ruby gcc mysql-devel ruby-devel rubygems java-1.7.0-openjdk git hg -sudo yum -y install golang -sudo yum -y install rubygem-nokogiri - -# riemann server -wget https://aphyr.com/riemann/riemann-0.2.9.tar.bz2 -tar xvfj riemann-0.2.9.tar.bz2 -sudo mv riemann-0.2.9 /usr/local/lib/riemann - -# riemann dash -sudo gem install --no-ri --no-rdoc riemann-client riemann-tools riemann-dash - -# systemd -sudo cp /vagrant/service/riemann.service /usr/lib/systemd/system/ -sudo cp /vagrant/service/riemann-dash.service /usr/lib/systemd/system/ - -sudo systemctl enable riemann -sudo systemctl enable riemann-dash - -sudo systemctl start riemann -sudo systemctl start riemann-dash - -# gopath -echo "export GOPATH=/vagrant/go" >> /home/vagrant/.bash_profile -echo "export GOBIN=/vagrant/go/bin" >> /home/vagrant/.bash_profile -echo "export PATH=$PATH:$GOBIN" >> /home/vagrant/.bash_profile - -export GOPATH=/vagrant/go -export GOBIN=/vagrant/go/bin -export PATH=$PATH:$GOBIN -go get github.com/tools/godep - -cd $GOPATH/src/github.com/intelsdi-x/snap -scripts/deps.sh -make - -echo "PATH=$PATH:$GOPATH/src/github.com/intelsdi-x/snap/build/bin" >> /home/vagrant/.bash_profile -export PATH=$PATH:$GOPATH/src/github.com/intelsdi-x/snap/build/bin diff --git a/examples/riemann/service/config.rb b/examples/riemann/service/config.rb deleted file mode 100644 index 3e7af33b7..000000000 --- a/examples/riemann/service/config.rb +++ /dev/null @@ -1,2 +0,0 @@ -set :port, 4567 -set :bind, "0.0.0.0" \ No newline at end of file diff --git a/examples/riemann/service/riemann-dash.service b/examples/riemann/service/riemann-dash.service deleted file mode 100644 index 26e9180c4..000000000 --- a/examples/riemann/service/riemann-dash.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=The Riemann Dashboard -Documentation= - -[Service] -Type=simple -ExecStart=/usr/local/bin/riemann-dash /vagrant/service/config.rb - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/examples/riemann/service/riemann.service b/examples/riemann/service/riemann.service deleted file mode 100644 index 58c884d4d..000000000 --- a/examples/riemann/service/riemann.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=The Riemann Service -Documentation= - -[Service] -Type=simple -ExecStart=/usr/local/lib/riemann/bin/riemann - -[Install] -WantedBy=multi-user.target diff --git a/examples/tasks/README.md b/examples/tasks/README.md deleted file mode 100644 index a99c7903b..000000000 --- a/examples/tasks/README.md +++ /dev/null @@ -1,56 +0,0 @@ - - -A task describes the how, what, and when to do for a __snap__ job. A task is described in a task _manifest_, which can be either JSON or YAML. For more information, see the [documentation for tasks](/docs/TASKS.md). - -# Examples in this folder - -- **mock-file.json/yaml**: a simple example of task structure in both JSON and YAML format. - - schedule - - interval (1s) - - collector - - mock - - processor - - passthru - - publisher - - file - -- **psutil-file.yaml**: another simple example of collecting statistics and publishing them to a file. This file includes in-line comments to help get oriented with task structure. - - schedule - - interval (1s) - - collector - - psutil - - publisher - - file - -- **ceph-file.json**: collect numerous statistics around Ceph, a storage system for OpenStack. - - schedule - - interval (1s) - - collector - - Ceph - - publisher - - file - -- **psutil-influx.json**: a more complex example that collects information from psutil and publishes to an instance of InfluxDB running locally. See [influxdb-grafana](../influxdb-grafana/) for other files to get this running. - - schedule - - interval (1s) - - collector - - psutil - - publisher - - influxdb diff --git a/examples/tasks/ceph-file.json b/examples/tasks/ceph-file.json deleted file mode 100644 index 9a4481cae..000000000 --- a/examples/tasks/ceph-file.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "1s" - }, - "max-failures": 10, - "workflow": { - "collect": { - "metrics": { - "/intel/storage/ceph/mon.a/cluster/num_mon": {}, - "/intel/storage/ceph/mon.a/cluster/num_osd": {}, - "/intel/storage/ceph/mon.a/cluster/num_object": {}, - "/intel/storage/ceph/mon.a/cluster/num_pg": {}, - "/intel/storage/ceph/mon.a/cluster/osd_bytes_used": {}, - "/intel/storage/ceph/mon.a/cluster/osd_bytes": {}, - "/intel/storage/ceph/mon.b/cluster/osd_bytes_used": {}, - "/intel/storage/ceph/mon.b/cluster/osd_bytes": {}, - "/intel/storage/ceph/osd.0/filestore/bytes": {}, - "/intel/storage/ceph/osd.0/filestore/journal_bytes": {}, - "/intel/storage/ceph/osd.0/filestore/journal_latency/avgcount": {}, - "/intel/storage/ceph/osd.0/filestore/journal_latency/sum": {}, - "/intel/storage/ceph/osd.0/filestore/journal_queue_bytes": {}, - "/intel/storage/ceph/osd.0/filestore/journal_queue_max_bytes": {}, - "/intel/storage/ceph/osd.0/filestore/journal_wr": {}, - "/intel/storage/ceph/osd.0/filestore/op_queue_bytes": {}, - "/intel/storage/ceph/osd.0/filestore/op_queue_max_bytes": {}, - "/intel/storage/ceph/osd.0/filestore/queue_transaction_latency_avg/avgcount": {}, - "/intel/storage/ceph/osd.0/filestore/queue_transaction_latency_avg/sum": {}, - "/intel/storage/ceph/osd.0/osd/op": {}, - "/intel/storage/ceph/osd.0/osd/op_in_bytes": {}, - "/intel/storage/ceph/osd.0/osd/op_latency/avgcount": {}, - "/intel/storage/ceph/osd.0/osd/op_latency/sum": {}, - "/intel/storage/ceph/osd.0/osd/op_process_latency/avgcount": {}, - "/intel/storage/ceph/osd.0/osd/op_process_latency/sum": {}, - "/intel/storage/ceph/osd.0/osd/op_w_in_bytes": {}, - "/intel/storage/ceph/osd.0/osd/op_w_latency/avgcount": {}, - "/intel/storage/ceph/osd.0/osd/op_w_latency/sum": {}, - "/intel/storage/ceph/osd.0/osd/op_w_process_latency/avgcount": {}, - "/intel/storage/ceph/osd.0/osd/op_w_process_latency/sum": {}, - "/intel/storage/ceph/osd.1/filestore/bytes": {}, - "/intel/storage/ceph/osd.1/filestore/journal_bytes": {}, - "/intel/storage/ceph/osd.1/filestore/journal_latency/avgcount": {}, - "/intel/storage/ceph/mds.a/objecter/op_w": {}, - "/intel/storage/ceph/mds.b/objecter/op_w": {} - }, - "config": { - "/intel/storage/ceph": { - "user": "root", - "password": "secret" - } - }, - "process": null, - "publish": [ - { - "plugin_name": "file", - "plugin_version": 1, - "config": { - "file": "/tmp/snap_published_ceph_file.log" - } - } - ] - } - } -} - diff --git a/examples/tasks/distributed-mock-file.json b/examples/tasks/distributed-mock-file.json deleted file mode 100644 index 432aeb8f7..000000000 --- a/examples/tasks/distributed-mock-file.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "1s" - }, - "max-failures": 10, - "workflow": { - "collect": { - "metrics": { - "/intel/mock/foo": {}, - "/intel/mock/bar": {}, - "/intel/mock/*/baz": {} - }, - "config": { - "/intel/mock": { - "user": "root", - "password": "secret" - } - }, - "process": [ - { - "plugin_name": "passthru", - "target": "127.0.0.1:9999", - "process": null, - "publish": [ - { - "plugin_name": "file", - "target": "127.0.0.1:9992", - "config": { - "file": "/tmp/snap_published_mock_file.log" - } - } - ] - } - ] - } - } -} diff --git a/examples/tasks/mock-file.json b/examples/tasks/mock-file.json deleted file mode 100644 index e8463971b..000000000 --- a/examples/tasks/mock-file.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "1s" - }, - "max-failures": 10, - "workflow": { - "collect": { - "metrics": { - "/intel/mock/foo": {}, - "/intel/mock/bar": {}, - "/intel/mock/*/baz": {} - }, - "config": { - "/intel/mock": { - "name": "root", - "password": "secret" - } - }, - "process": [ - { - "plugin_name": "passthru", - "process": null, - "publish": [ - { - "plugin_name": "file", - "config": { - "file": "/tmp/snap_published_mock_file.log" - } - } - ] - } - ] - } - } -} diff --git a/examples/tasks/mock-file.yaml b/examples/tasks/mock-file.yaml deleted file mode 100644 index efce91720..000000000 --- a/examples/tasks/mock-file.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - version: 1 - schedule: - type: "simple" - interval: "1s" - max-failures: 10 - workflow: - collect: - metrics: - /intel/mock/foo: {} - /intel/mock/bar: {} - /intel/mock/*/baz: {} - config: - /intel/mock: - name: "root" - password: "secret" - process: - - - plugin_name: "passthru" - config: - debug: true - process: null - publish: - - - plugin_name: "file" - config: - file: "/tmp/snap_published_mock_file.log" - debug: true diff --git a/examples/tasks/mock-file_specific_instance.json b/examples/tasks/mock-file_specific_instance.json deleted file mode 100644 index 7f84297ec..000000000 --- a/examples/tasks/mock-file_specific_instance.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "2s" - }, - "workflow": { - "collect": { - "metrics": { - "/intel/mock/host0/baz": {}, - "/intel/mock/host1/baz": {} - }, - "config": { - "/intel/mock": { - "user": "root", - "password": "secret" - } - }, - "process": null, - "publish": [ - { - "plugin_name": "mock-file", - "config": { - "file": "/tmp/snap_published_mock_file.log" - } - } - ] - } - } -} diff --git a/examples/tasks/mock-file_tuple.json b/examples/tasks/mock-file_tuple.json deleted file mode 100644 index d5a81b9bf..000000000 --- a/examples/tasks/mock-file_tuple.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "1s" - }, - "workflow": { - "collect": { - "metrics": { - "/intel/mock/(host0;host1)/baz": {} - }, - "config": { - "/intel/mock": { - "user": "root", - "password": "secret" - } - }, - "process": null, - "publish": [ - { - "plugin_name": "mock-file", - "config": { - "file": "/tmp/snap_published_mock_file.log" - } - } - ] - } - } -} diff --git a/examples/tasks/mock_tagged-file.json b/examples/tasks/mock_tagged-file.json deleted file mode 100644 index 9a43baff0..000000000 --- a/examples/tasks/mock_tagged-file.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "1s" - }, - "workflow": { - "collect": { - "metrics": { - "/intel/mock/foo": {}, - "/intel/mock/bar": {}, - "/intel/mock/*/baz": {} - }, - "config": { - "/intel/mock": { - "name": "root", - "password": "secret" - } - }, - "tags": { - "/intel/mock": { - "experiment": "1", - "os": "linux" - } - }, - - "process": [ - { - "plugin_name": "passthru", - "process": null, - "publish": [ - { - "plugin_name": "file", - "config": { - "file": "/tmp/snap_published_mock_file.log" - } - } - ] - } - ] - } - } -} diff --git a/examples/tasks/psutil-file.yaml b/examples/tasks/psutil-file.yaml deleted file mode 100644 index 098387411..000000000 --- a/examples/tasks/psutil-file.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - version: 1 - schedule: - type: "simple" - interval: "1s" - max-failures: 10 - workflow: - collect: - metrics: - /intel/psutil/load/load1: {} - /intel/psutil/load/load15: {} - /intel/psutil/load/load5: {} - /intel/psutil/vm/available: {} - /intel/psutil/vm/free: {} - /intel/psutil/vm/used: {} - config: - process: - - - plugin_name: "passthru" - process: null - publish: - - - plugin_name: "file" - config: - file: "/tmp/snap_published_demo_file.log" diff --git a/examples/tasks/psutil-file_no-processor.yaml b/examples/tasks/psutil-file_no-processor.yaml deleted file mode 100644 index a405bf0e3..000000000 --- a/examples/tasks/psutil-file_no-processor.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - version: 1 - schedule: - type: "simple" - interval: "1s" - max-failures: 10 - workflow: - collect: - metrics: - /intel/psutil/load/load1: {} - /intel/psutil/load/load15: {} - /intel/psutil/load/load5: {} - publish: - - - plugin_name: "file" - config: - file: "/tmp/snap_published_demo_file.log" diff --git a/examples/tasks/psutil-influx.json b/examples/tasks/psutil-influx.json deleted file mode 100644 index e39417858..000000000 --- a/examples/tasks/psutil-influx.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "version": 1, - "schedule": { - "type": "simple", - "interval": "1s" - }, - "max-failures": 10, - "workflow": { - "collect": { - "metrics": { - "/intel/psutil/load/load1": {}, - "/intel/psutil/load/load5": {}, - "/intel/psutil/load/load15": {}, - "/intel/psutil/vm/available": {}, - "/intel/psutil/vm/free": {}, - "/intel/psutil/vm/used": {} - }, - "process": null, - "publish": [ - { - "plugin_name": "influx", - "config": { - "host": "INFLUXDB_IP", - "port": 8086, - "database": "snap", - "user": "admin", - "password": "admin" - } - } - ] - } - } -}