diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..c384fa0b9f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,24 @@ +--- +name: Bug report +about: Create a report to help us improve + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/Makefile b/Makefile index af55e751d0..ccdc74cf48 100644 --- a/Makefile +++ b/Makefile @@ -308,7 +308,7 @@ services := api \ webhooks2tasks \ hacky-rest2tasks-ui \ rabbitmq \ - logs-collector \ + logs-forwarder \ logs-db \ logs-db-ui \ logs2logs-db \ @@ -620,10 +620,11 @@ minishift/login-docker-registry: openshift-lagoon-setup: # Only use the minishift provided oc if we don't have one yet (allows system engineers to use their own oc) if ! which oc; then eval $$(./local-dev/minishift/minishift --profile $(CI_BUILD_TAG) oc-env); fi; \ - oc -n default set env dc/router -e ROUTER_LOG_LEVEL=info -e ROUTER_SYSLOG_ADDRESS=192.168.99.1:5140; \ + oc -n default set env dc/router -e ROUTER_LOG_LEVEL=info -e ROUTER_SYSLOG_ADDRESS=router-logs.lagoon.svc:5140; \ oc new-project lagoon; \ oc adm pod-network make-projects-global lagoon; \ oc -n lagoon create serviceaccount openshiftbuilddeploy; \ + oc -n lagoon policy add-role-to-user admin -z openshiftbuilddeploy; \ oc -n lagoon create -f openshift-setup/clusterrole-openshiftbuilddeploy.yaml; \ oc -n lagoon adm policy add-cluster-role-to-user openshiftbuilddeploy -z openshiftbuilddeploy; \ oc -n lagoon create -f openshift-setup/shared-resource-viewer.yaml; \ @@ -631,7 +632,16 @@ openshift-lagoon-setup: oc -n lagoon create serviceaccount docker-host; \ oc -n lagoon adm policy add-scc-to-user privileged -z docker-host; \ oc -n lagoon policy add-role-to-user edit -z docker-host; \ - bash -c "oc process -n lagoon -f openshift-setup/docker-host.yaml | oc -n lagoon apply -f -"; \ + oc -n lagoon create serviceaccount logs-collector; \ + oc -n lagoon adm policy add-cluster-role-to-user cluster-reader -z logs-collector; \ + oc -n lagoon adm policy add-scc-to-user hostaccess -z logs-collector; \ + oc -n lagoon adm policy add-scc-to-user privileged -z logs-collector; \ + oc -n lagoon adm policy add-cluster-role-to-user daemonset-admin -z lagoon-deployer; \ + oc -n lagoon create serviceaccount lagoon-deployer; \ + oc -n lagoon policy add-role-to-user edit -z openshiftbuilddeploy; \ + oc -n lagoon create -f openshift-setup/clusterrole-daemonset-admin.yaml; \ + oc -n lagoon adm policy add-cluster-role-to-user daemonset-admin -z lagoon-deployer; \ + bash -c "oc process -n lagoon -f services/docker-host/docker-host.yaml | oc -n lagoon apply -f -"; \ echo -e "\n\nAll Setup, use this token as described in the Lagoon Install Documentation:" \ oc -n lagoon serviceaccounts get-token openshiftbuilddeploy @@ -641,7 +651,8 @@ openshift-lagoon-setup: .PHONY: openshift/configure-lagoon-local minishift/configure-lagoon-local: openshift-lagoon-setup eval $$(./local-dev/minishift/minishift --profile $(CI_BUILD_TAG) oc-env); \ - bash -c "oc process -n lagoon -p IMAGE=docker-registry.default.svc:5000/lagoon/docker-host:latest -p REPOSITORY_TO_UPDATE=lagoon -f openshift-setup/docker-host-minishift.yaml | oc -n lagoon apply -f -"; + bash -c "oc process -n lagoon -p SERVICE_IMAGE=172.30.1.1:5000/lagoon/docker-host:latest -p REPOSITORY_TO_UPDATE=lagoon -f services/docker-host/docker-host.yaml | oc -n lagoon apply -f -"; \ + oc -n default set env dc/router -e ROUTER_LOG_LEVEL=info -e ROUTER_SYSLOG_ADDRESS=192.168.99.1:5140; \ # Stop OpenShift Cluster .PHONY: minishift/stop diff --git a/docker-compose.yaml b/docker-compose.yaml index 9eaa9f1eae..0569121ac0 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -260,13 +260,13 @@ services: lagoon.type: elasticsearch lagoon.template: services/logs-db/.lagoon.yml lagoon.image: amazeeiolagoon/logs-db:${SAFE_BRANCH:-master} - logs-collector: - image: ${IMAGE_REPO:-lagoon}/logs-collector + logs-forwarder: + image: ${IMAGE_REPO:-lagoon}/logs-forwarder user: '111111111' labels: lagoon.type: custom - lagoon.template: services/logs-collector/.lagoon.yml - lagoon.image: amazeeiolagoon/logs-collector:${SAFE_BRANCH:-master} + lagoon.template: services/logs-forwarder/.lagoon.yml + lagoon.image: amazeeiolagoon/logs-forwarder:${SAFE_BRANCH:-master} logs-db-ui: image: ${IMAGE_REPO:-lagoon}/logs-db-ui user: '111111111' @@ -281,6 +281,7 @@ services: user: '111111111' ports: - "5140:5140/udp" + - "5044:5044" labels: lagoon.type: logstash lagoon.template: services/logs2logs-db/.lagoon.yml @@ -305,3 +306,9 @@ services: lagoon.type: custom lagoon.template: services/storage-calculator/.lagoon.yml lagoon.image: amazeeiolagoon/storage-calculator:${SAFE_BRANCH:-master} + logs-collector: + image: openshift/origin-logging-fluentd:v3.6.1 + labels: + lagoon.type: custom + lagoon.template: services/logs-collector/.lagoon.yml + lagoon.rollout: daemonset diff --git a/docs/administering_lagoon/create-project.gql b/docs/administering_lagoon/create-project.gql index 39705da1d7..bf2472c3c0 100644 --- a/docs/administering_lagoon/create-project.gql +++ b/docs/administering_lagoon/create-project.gql @@ -5,6 +5,7 @@ mutation { # see an example in /local-dev/api-data/api-data.sql) addCustomer(input: {name: "customer-name", private_key: "[fill me]"}) { name + id } # The OpenShift Cluster that Lagoon should use to deploy to. Yes Lagoon is not only capable to deploy into the OpenShift that @@ -14,10 +15,11 @@ mutation { # `token` - the token of the `lagoon` Service Account creted in this OpenShift (this is the same token that we also used during installation of Lagoon) addOpenshift(input: {name: "my-openshift", console_url:"[fill me]", token: "[fill me]"}) { name + id } # This is your git repository that should be deployed, it needs to contain a `.lagoon.yml` file so Lagoon knows what it should do. - addProject(input:{name: "first-project", customer:"customer-name", openshift: "my-openshift", git_url: "[fill me]"}) { + addProject(input:{name: "first-project", customer:[customer-id], openshift:[openshift-id], git_url: "[fill me]"}) { name customer { name diff --git a/docs/administering_lagoon/graphql_api.md b/docs/administering_lagoon/graphql_api.md index a434b461ff..ba8042d403 100644 --- a/docs/administering_lagoon/graphql_api.md +++ b/docs/administering_lagoon/graphql_api.md @@ -35,17 +35,106 @@ And press the Play button (or press CTRL+ENTER). If all went well, you should se In order for Lagoon to deploy a project there is an example graphql in `create-project.gql`, which will create three API Objects: -1. `project` This is your git repository that should be deployed, it needs to contain a `.lagoon.yml` file so Lagoon knows what it should do. +1. `customer` The customer of the project. Can be used for an actual customer (if you use Lagoon in a multi-customer setup), or just to group multiple projects together. `customer` will hold the SSH Private Key that Lagoon will use to clone the Git repository of the project (the private key needs to be in a single string, where new lines are replaced by `\n` - see an example in /local-dev/api-data/api-data.sql) 2. `openshift` The OpenShift Cluster that Lagoon should use to deploy to. Yes Lagoon is not only capable to deploy into the OpenShift that it is running itself, but actually to any OpenShift anywhere in the world. We need to know the following infos for this to work: 1. `name` - Unique identifier of the OpenShift 2. `console_url` - URL of the OpenShift console (without any `/console` suffix) 3. `token` - the token of the `lagoon` Service Account created in this OpenShift (this is the same token that we also used during installation of Lagoon) -3. `customer` The customer of the project. Can be used for an actual customer (if you use Lagoon in a multi-customer setup), or just to group multiple projects together. `customer` will hold the SSH Private Key that Lagoon will use to clone the Git repository of the project (the private key needs to be in a single string, where new lines are replaced by `\n` - see an example in /local-dev/api-data/api-data.sql) +3. `project` This is your git repository that should be deployed, it needs to contain a `.lagoon.yml` file so Lagoon knows what it should do. Just fill all the `[fill me]` you can find in the examples below, copy it into the GraphiQL Client, press play and if everything went well, you should get a response which shows you the name of the customer & openshift object and the full project object that just has been created. Congrats again 🎉! +#### Give Access to the Project + +In Lagoon the individual developers are authenticating themselves via their SSH Keys. Via their SSH Keys they have access to multiple things: + +1. The Lagoon API itself, where they can only see and edit projects they actually have access too +2. Remote Shell Access to containers that are running in projects they have access too +3. The Lagoon logging system, where a developer can find Request Logs, Container Logs, Lagoon Logs and many more. + +First we need to add a new SSH Public key to the API: + +``` +mutation addSSHKey { + addSshKey(input:{name:"[name]", keyValue:"[keyValue]", keyType:SSH_RSA}) { + id + } +} +``` + +- `name` - Your identificator for this SSH Key, can by any string +- `keyValue` - The actual SSH Public Key Value (withouth the type on front and no name at the end, so just something like `AAAAB3NzaC1yc2EAAAADAQ...3QjzIOtdQERGZuMsi0p`) +- `keyType` - The type of the key, there are currently two types supported by Lagoon: `SSH_RSA` and `SSH_ED25519` + +After we added the key we can give this key access to either a single project or a whole customer, while access to a whole customer means that this SSH key has automatically access to all projects that are assigned to this customer. + +``` +mutation addSshKeyToCustomer { + addSshKeyToCustomer(input:{customer:"[customer-name]", sshKey:"[sshKey-name]"}) { + id + } +} +``` + +or + +``` +mutation addSshKeyToProject { + addSshKeyToProject(input:{project:"[project-name]", sshKey:"[sshKey-name]"}) { + id + } +} +``` + +That's it, now this SSH key can create Tokens via SSH, access containers and more. + +Of corse it is possible to add an SSH Key to multiple customers and projects, whatever you need. + +#### Add Notifications to the Project + +If you like to know what exactly is going on during a deployment, we suggest to configure notifications for your project, they will provide: + +- Push messages +- Build start information +- Build success or failure messages +- Many more + +Like with the SSH Keys, we first add the Notification and then we connect the Notification to the Projects. As the Notifications can be quite different of their information they need, the notification types are built a bit more sofisticated and each Notification Type has it's own mutation: + +``` +mutation addNotificationSlack { + addNotificationSlack(input:{name:"[name]]", channel:"[channel]", webhook:"[webhook]"}) { + id + } +} +``` + +``` +mutation addNotificationRocketChat { + addNotificationSlack(input:{name:"[name]]", channel:"[channel]", webhook:"[webhook]"}) { + id + } +} +``` + +- `name` - Is your own identificator for this Notification +- `channel` - Which channel should the message be sent to +- `webhook` - The URL of the webhook where messages should be sent, this is usally provided by the Chat System to you. + +After we create that we can now connect this notification to our project: + +``` +mutation addNotificationToProject { + addNotificationToProject(input:{notificationType: SLACK, project:"[project-name]", notificationName:"[notification-name]"}) { + id + } +} +``` + +Now for every deployment you should see messages appear in your defined channel. + ## Example GraphQL queries ### Add New OpenShift Target @@ -60,6 +149,7 @@ The OpenShift Cluster that Lagoon should use to deploy to. Yes, Lagoon is not on mutation { addOpenshift(input: {name: "my-openshift", console_url:"[fill me]", token: "[fill me]"}) { name + id } } ``` @@ -72,6 +162,7 @@ The customer of the project. Can be used for an actual customer (if you use Lago mutation { addCustomer(input: {name: "[fill me]", private_key: "[fill me]"}) { name + id } } ``` @@ -82,13 +173,15 @@ This is your git repository that should be deployed, it needs to contain a `.lag ``` mutation { - addProject(input:{name: "first-project", customer:"customer-name", openshift: "my-openshift", git_url: "[fill me]"}) { + addProject(input:{name: "first-project", customer:[customer-id], openshift:[openshift-id], git_url: "[fill me]"}) { name customer { name + id } openshift { name + id } git_url, active_systems_deploy, @@ -101,14 +194,13 @@ mutation { ### List Projects and Customers +This is a good comand to see an overview of all Projects, OpenShifts and Customers that exist within our Lagoon. + ``` query whatIsThereAlready{ allProjects { name git_url - notifications { - ...slack - } } allOpenshifts { name @@ -119,8 +211,89 @@ query whatIsThereAlready{ id } } +``` + +### Single Project + +If you want to get an in depth look into a single project, this querry has been proven quite good: + +``` +query singleProject { + projectByName(name: "[projectname]") { + id + branches + git_url + pullrequests + production_environment + notifications(type: SLACK) { + ... on NotificationSlack { + name + channel + webhook + id + } + } + environments { + name + deploy_type + environment_type + } + openshift { + id + } + customer { + id + name + sshKeys { + id + name + } + } + } +} +``` + +### Project by Git URL + +Don't remember how a project was called, but now the Git URL? Search no longer, there is an GraphQL Query for that: + +``` +query projectByGitUrl{ + projectByGitUrl(gitUrl: "git@server.com:org/repo.git") { + name + } +} +``` + + +### Update Objects + +The Lagoon GraphQL API cannot only display Objects and create Objects, it also has the capability to update exisitng Objects, all of this happens in full GraphQL best practices manner. + +Update the branches to deploy within a project: +``` +mutation editProjectBranches { + updateProject(input:{id:109, patch:{branches:"^(prod|stage|dev|update)$"}}) { + id + } +} +``` -fragment slack on NotificationSlack { - name +Update the production Environment within a project (Important: Needs a redeploy in order for all changes to be reflected in the containers): +``` +mutation editProjectProductionEnvironment { + updateProject(input:{id:109, patch:{production_environment:"master"}}) { + id + } } ``` + +You can also combine multiple changes at once: + +``` +mutation editProjectProductionEnvironmentAndBranches { + updateProject(input:{id:109, patch:{production_environment:"master", branches:"^(prod|stage|dev|update)$"}}) { + id + } +} +``` \ No newline at end of file diff --git a/docs/administering_lagoon/install.md b/docs/administering_lagoon/install.md index 2216047abb..470466839f 100644 --- a/docs/administering_lagoon/install.md +++ b/docs/administering_lagoon/install.md @@ -35,9 +35,9 @@ In this example we create the Service Account `lagoon` in the OpenShift Project In order to use a local Lagoon to deploy itself on an OpenShift, we need a subset of Lagoon running locally. We need to tech this local Lagoon how to connect to the OpenShift: -1. Edit `lagoon` inside local-dev/api-data/api-data.sql, in the `INSERT INTO openshift` section: - 1. `[replace me with OpenShift console URL]` - The URL to the OpenShift Console, without `console` at the end. - 2. `[replace me with OpenShift Token]` - The token of the lagoon service account that was shown to you during `make openshift-lagoon-setup` +1. Edit `lagoon` inside local-dev/api-data/api-data.sql, in the `Lagoon Kickstart Objects` section: + 1. `[REPLACE ME WITH OPENSHIFT URL]` - The URL to the OpenShift Console, without `console` at the end. + 2. `[REPLACE ME WITH OPENSHIFT LAGOON SERVICEACCOUTN TOKEN]` - The token of the lagoon service account that was shown to you during `make openshift-lagoon-setup` 2. Build required Images and start services: diff --git a/docs/create-project.gql b/docs/create-project.gql deleted file mode 100644 index 109b4295df..0000000000 --- a/docs/create-project.gql +++ /dev/null @@ -1,35 +0,0 @@ -mutation { - # The customer of the project. Can be used for an actual customer (if you use Lagoon in a multi-customer setup), - # or just to group multiple projects together. `customer` will hold the SSH Private Key that Lagoon will use to clone - # the Git repository of the project (the private key needs to be in a single string, where new lines are replaced by `\n` - # see an example in /local-dev/api-data/api-data.sql) - addCustomer(input: {name: "customer-name", private_key: "[fill me]"}) { - name - } - - # The OpenShift Cluster that Lagoon should use to deploy to. Yes Lagoon is not only capable to deploy into the OpenShift that - # it is running itself, but actually to any OpenShift anywhere in the world. We need to know the following infos for this to work: - # `name` - Unique identifier of the OpenShift - # `console_url` - URL of the OpenShift console (without any `/console` suffix) - # `token` - the token of the `lagoon` Service Account created in this OpenShift (this is the same token that we also used during installation of Lagoon) - addOpenshift(input: {name: "my-openshift", console_url:"[fill me]", token: "[fill me]"}) { - name - } - - # This is your git repository that should be deployed, it needs to contain a `.lagoon.yml` file so Lagoon knows what it should do. - addProject(input:{name: "first-project", customer:"customer-name", openshift: "my-openshift", git_url: "[fill me]"}) { - name - customer { - name - } - openshift { - name - } - git_url, - active_systems_deploy, - active_systems_promote, - active_systems_remove, - branches, - pullrequests - } -} diff --git a/docs/developing_lagoon/contributing.md b/docs/developing_lagoon/contributing.md index 7efdf75f8b..87ccd831eb 100644 --- a/docs/developing_lagoon/contributing.md +++ b/docs/developing_lagoon/contributing.md @@ -18,7 +18,7 @@ We're always interested in fixing issues, therefore issue reports are very welco Cool! Just create an [issue](https://github.com/amazeeio/lagoon/issues) and we're happy to look over it. We can't guarantee though that it will be implemented. But we are always interested in hearing ideas where we can bring Lagoon to. -Another good way is also to talk to us via Slack about your idea. [Join today](https://slack.amazee.io/) in the channel #lagoon. +Another good way is also to talk to us via RocketChat about your idea. [Join today](https://amazeeio.rocket.chat/) in the channel #lagoon. ## I wrote some code diff --git a/docs/index.md b/docs/index.md index 7912f83f8c..d121b5b833 100644 --- a/docs/index.md +++ b/docs/index.md @@ -15,7 +15,7 @@ Lagoon solves what developers are dreaming about: A system that allows developer ## Help? -Questions? Ideas? Meet the maintainers and contributors: `#lagoon` in amazee.io Slack [https://slack.amazee.io]() +Questions? Ideas? Meet the maintainers and contributors: `#lagoon` in amazee.io RocketChat [https://amazeeio.rocket.chat]() ## A couple of things about Lagoon 1. Lagoon is based on microservices. A whole deployment and build workflow is very complex; not only do we have multiple sources (like Github, Bitbucket, Gitlab, etc.), multiple OpenShift servers and multiple notification systems (Slack, Rocketchat, etc.), but each deployment is unique and can take from seconds to hours. So it's built with flexibility and robustness in mind. Having microservices that all communicate through a messaging system (RabbitMQ) allows us to scale individual services up and down, survive down times of individual services and also to try out new parts of Lagoon in production without affecting others. diff --git a/docs/using_lagoon/lagoon_yml.md b/docs/using_lagoon/lagoon_yml.md index 9712d0c801..aca60d2c6e 100644 --- a/docs/using_lagoon/lagoon_yml.md +++ b/docs/using_lagoon/lagoon_yml.md @@ -19,15 +19,31 @@ tasks: name: env variables command: env service: cli + - run: + name: IF no Drupal installed drush si with no email sending + command: | + if [[ $(drush core-status bootstrap --pipe) == "" ]]; then + # no drupal installed, we install drupal from scratch + drush -y si + else + # drupal already installed, do nothing + echo "drupal already installed" + fi + service: cli + shell: bash - run: name: drush cim command: drush -y cim service: cli + shell: bash - run: name: drush cr command: drush -y cr service: cli +routes: + insecure: Redirect + environments: master: routes: @@ -41,12 +57,33 @@ environments: schedule: "H * * * *" # this will run the cron once per Hour command: drush cron service: cli - + staging: + cronjobs: + - name: drush cron + schedule: "H * * * *" # this will run the cron once per Hour + command: drush cron + service: cli ``` +## General Settings +### `docker-compose-yaml` +Tells the build script which docker-compose yaml file should be used in order to learn which services and containers should be deployed. This defaults to `docker-compose.yml` but could be used for a specific lagoon docker-compose yaml file if you need something like that. + +## `routes.insecure` +This allows you to define the behaviour of the automatic creates routes (NOT the custom routes per environment, see below for them). You can define: +* `Allow` simply sets up both routes for http and https (this is the default). +* `Redirect` will redirect any http requests to https +* `None` will mean a route for http will _not_ be created, and no redirect ## Tasks -### `post_rollout` -Here you can specify tasks which need to run against your project after a successful build and deploy. Common uses are to run `drush updb`, `drush cim`, or clear various caches. + +There are different type of tasks you can define, they differ when exactly they are executed in a build flow: + +### `post_rollout.[i].run` +Here you can specify tasks which need to run against your project, _after_: +- all Images have been successfully built +- all Containers are updated with the new Images +- all Containers are running have passed their readyness checks +Common uses are to run `drush updb`, `drush cim`, or clear various caches. * `name` - The name is an arbitrary label for making it easier to identify each task in the logs @@ -54,32 +91,54 @@ Here you can specify tasks which need to run against your project after a succes - Here you specify what command should run. These are run in the WORKDIR of each container, for Lagoon images this is `/app`, keep this in mind if you need to `cd` into a specific location to run your task. * `service` - The service which to run the task in. If following our drupal-example, this will be the CLI container, as it has all your site code, files, and a connection to the DB. Typically you do not need to change this. +* `shell` + - Which shell should be used to run the task in. By default `sh` is used, but if the container also has other shells (like `bash`, you can define it here). This is usefull if you want to run some small if/else bash scripts within the post-rollouts. ## Environments -Environment names match your deployed branches. -### `route` +Environment names match your deployed branches or pull requests, it allows you for each environment to have a different config, in our example it will apply to the `master` and `staging` environment. +### `environments.[name].routes` In the route section we identify the domain names which the environment will respond to. It is typical to only have an environment with routes specified for your production environment. All environments receive a generated route, but sometimes there is a need for a non-production environment to have it's own domain name, you can specify it here, and then add that domain with your DNS provider as a CNAME to the generated route name (these routes publish in deploy messages). The first element after the environment is the target service, `nginx` in our example. This is how we identify which service incoming requests will be sent to. -The simplest route is the `domain.com` example above. If you do not need SSL on your route, just add the domain, commit, and you are done. +The simplest route is the `domain.com` example above. This will assume that you want a Let's Encrypt certificate for your route and no redirect from https to http. -In the `www.domain.com` example, we see two more options: +In the `"www.domain.com"` example, we see two more options (also see the `:` at the end of the route and that the route is wrapped in `"`, that's important!): -* `tls-acme: 'true'` tells Lagoon to issue a Let's Encrypt certificate for that route. +* `tls-acme: 'true'` tells Lagoon to issue a Let's Encrypt certificate for that route, this is the default. If you don't like a Let's Encrypt set this to `tls-acme: 'false'` * `Insecure` can be set to `None`, `Allow` or `Redirect`. - * `Allow` simply sets up both routes for http and https + * `Allow` simply sets up both routes for http and https (this is the default). * `Redirect` will redirect any http requests to https * `None` will mean a route for http will _not_ be created, and no redirect will take place -### `cronjobs` +### `environments.[name].cronjobs` As most of the time it is not desireable to run the same cronjobs across all environments, you must explicitely define which jobs you want to run for each environment. * `name:` * Just a friendly name for identifying what the cronjob will do * `schedule:` - * The schedule at which to execute the cronjob. This follows the standard convention of cron. If you're not sure about the syntax [Crontab Generator](https://crontab-generator.org/) can help. You can alternatively specify `H` for the minute, and your cronjob will run once per hour at a random minute (the same minute each hour). + * The schedule at which to execute the cronjob. This follows the standard convention of cron. If you're not sure about the syntax [Crontab Generator](https://crontab-generator.org/) can help. + * You can specify `H` for the minute, and your cronjob will run once per hour at a random minute (the same minute each hour), or `H/15` to run it every 15 mins but with a random offset from the hour (like `6,21,36,51`) + * You can specify `H` for the hour, and your cronjob will run once per day at a random hour (the same hour every day) * `command:` * The command to execute. Like the tasks, this executes in the WORKDIR of the service, for Lagoon images this is `/app` * `service:` * Which service of your project to run the command in. For most projects this is the `cli` service. + +### `environments.[name].types` +The Lagoon Build processes checks the `lagoon.type` label from the `docker-compose.yml` file in order to learn what type of service should be deployed. + +Sometime though you would like to override the type just for a single environment and not for all of them, like if you want a mariadb-galera high availability database for your production environment called `master`: + +`service-name: service-type` +- `service-name` - is the name of the service from `docker-compose.yml` you would like to override +- `service-type` - the type of the service you would like the service to override to. + +Example: + +``` +environments: + master: + types: + mariadb: mariadb-galera +``` \ No newline at end of file diff --git a/images/mariadb/maxscale.sql b/images/mariadb/maxscale.sql index 234c910ab1..60c956b0c9 100644 --- a/images/mariadb/maxscale.sql +++ b/images/mariadb/maxscale.sql @@ -7,5 +7,6 @@ GRANT SELECT ON mysql.tables_priv TO 'maxscale'@'%'; GRANT SHOW DATABASES ON *.* TO 'maxscale'@'%'; GRANT REPLICATION SLAVE ON *.* to 'maxscale'@'%'; GRANT REPLICATION CLIENT ON *.* to 'maxscale'@'%'; +GRANT SUPER ON *.* TO 'maxscale'@'%'; FLUSH PRIVILEGES; diff --git a/images/mariadb/my.cnf b/images/mariadb/my.cnf index e438bbe879..d9a1a3cad2 100644 --- a/images/mariadb/my.cnf +++ b/images/mariadb/my.cnf @@ -9,8 +9,8 @@ socket = /run/mysqld/mysqld.sock [mysqld] port = 3306 socket = /run/mysqld/mysqld.sock -character_set_server = utf8 -collation_server = utf8_bin +character_set_server = utf8mb4 +collation_server = utf8mb4_bin expire_logs_days = 10 ignore_db_dirs=backup innodb_buffer_pool_size = 256M diff --git a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh index 9700035d28..b20c347757 100755 --- a/images/oc-build-deploy-dind/build-deploy-docker-compose.sh +++ b/images/oc-build-deploy-dind/build-deploy-docker-compose.sh @@ -378,25 +378,25 @@ do # Some Templates need additonal Parameters, like where persistent storage can be found. TEMPLATE_PARAMETERS=() - PERSISTENT_STORAGE_PATH=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent false) + PERSISTENT_STORAGE_CLASS=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent\\.class false) + if [ ! $PERSISTENT_STORAGE_CLASS == "false" ]; then + TEMPLATE_PARAMETERS+=(-p PERSISTENT_STORAGE_CLASS="${PERSISTENT_STORAGE_CLASS}") + fi + + PERSISTENT_STORAGE_SIZE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent\\.size false) + if [ ! $PERSISTENT_STORAGE_SIZE == "false" ]; then + TEMPLATE_PARAMETERS+=(-p PERSISTENT_STORAGE_SIZE="${PERSISTENT_STORAGE_SIZE}") + fi + + PERSISTENT_STORAGE_PATH=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent false) if [ ! $PERSISTENT_STORAGE_PATH == "false" ]; then TEMPLATE_PARAMETERS+=(-p PERSISTENT_STORAGE_PATH="${PERSISTENT_STORAGE_PATH}") - PERSISTENT_STORAGE_CLASS=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent\\.class false) - if [ ! $PERSISTENT_STORAGE_CLASS == "false" ]; then - TEMPLATE_PARAMETERS+=(-p PERSISTENT_STORAGE_CLASS="${PERSISTENT_STORAGE_CLASS}") - fi - PERSISTENT_STORAGE_NAME=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent\\.name false) if [ ! $PERSISTENT_STORAGE_NAME == "false" ]; then TEMPLATE_PARAMETERS+=(-p PERSISTENT_STORAGE_NAME="${PERSISTENT_STORAGE_NAME}") fi - - PERSISTENT_STORAGE_SIZE=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.persistent\\.size false) - if [ ! $PERSISTENT_STORAGE_SIZE == "false" ]; then - TEMPLATE_PARAMETERS+=(-p PERSISTENT_STORAGE_SIZE="${PERSISTENT_STORAGE_SIZE}") - fi fi DEPLOYMENT_STRATEGY=$(cat $DOCKER_COMPOSE_YAML | shyaml get-value services.$COMPOSE_SERVICE.labels.lagoon\\.deployment\\.strategy false) @@ -427,7 +427,7 @@ do CRONJOB_SCHEDULE=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.cronjobs.$CRONJOB_COUNTER.schedule) # Convert the Cronjob Schedule for additional features and better spread - CRONJOB_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "$CRONJOB_SCHEDULE") + CRONJOB_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "$CRONJOB_SCHEDULE") CRONJOB_COMMAND=$(cat .lagoon.yml | shyaml get-value environments.${BRANCH//./\\.}.cronjobs.$CRONJOB_COUNTER.command) CRONJOBS_ARRAY+=("${CRONJOB_SCHEDULE} ${CRONJOB_COMMAND}") @@ -450,7 +450,7 @@ do CRONJOB_SCHEDULE=$(cat ${SERVICE_CRONJOB_FILE} | shyaml get-value $CRONJOB_COUNTER.schedule) # Convert the Cronjob Schedule for additional features and better spread - CRONJOB_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "$CRONJOB_SCHEDULE") + CRONJOB_SCHEDULE=$( /oc-build-deploy/scripts/convert-crontab.sh "${OPENSHIFT_PROJECT}" "$CRONJOB_SCHEDULE") CRONJOB_COMMAND=$(cat ${SERVICE_CRONJOB_FILE} | shyaml get-value $CRONJOB_COUNTER.command) CRONJOBS_ARRAY+=("${CRONJOB_SCHEDULE} ${CRONJOB_COMMAND}") @@ -522,6 +522,17 @@ do STATEFULSET="${SERVICE_NAME}" . /oc-build-deploy/scripts/exec-monitor-statefulset.sh + elif [ $SERVICE_ROLLOUT_TYPE == "statefulset" ]; then + + STATEFULSET="${SERVICE_NAME}" + . /oc-build-deploy/scripts/exec-monitor-statefulset.sh + + elif [ $SERVICE_ROLLOUT_TYPE == "deamonset" ]; then + + DAEMONSET="${SERVICE_NAME}" + . /oc-build-deploy/scripts/exec-monitor-deamonset.sh + + elif [ ! $SERVICE_ROLLOUT_TYPE == "false" ]; then . /oc-build-deploy/scripts/exec-monitor-deploy.sh fi @@ -562,4 +573,3 @@ do let COUNTER=COUNTER+1 done - diff --git a/images/oc-build-deploy-dind/build-deploy.sh b/images/oc-build-deploy-dind/build-deploy.sh index 24a8f1b8db..6a692af563 100755 --- a/images/oc-build-deploy-dind/build-deploy.sh +++ b/images/oc-build-deploy-dind/build-deploy.sh @@ -20,6 +20,10 @@ fi LAGOON_GIT_SHA=`git rev-parse HEAD` +if [[ -n "$SUBFOLDER" ]]; then + cd $SUBFOLDER +fi + if [ ! -f .lagoon.yml ]; then echo "no .lagoon.yml file found"; exit 1; fi diff --git a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml index c6c8ec3636..8b41306ffd 100644 --- a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml +++ b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/deployment.yml @@ -40,6 +40,12 @@ parameters: - name: CRONJOBS description: Oneliner of Cronjobs value: "" + - name: PERSISTENT_STORAGE_SIZE + description: Size of the Storage to request + value: "5Gi" + - name: PERSISTENT_STORAGE_CLASS + description: Name of the Storage Class to use + value: "" objects: - apiVersion: v1 kind: DeploymentConfig diff --git a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/pvc.yml b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/pvc.yml index e1fbfc8012..41f936e87d 100644 --- a/images/oc-build-deploy-dind/openshift-templates/elasticsearch/pvc.yml +++ b/images/oc-build-deploy-dind/openshift-templates/elasticsearch/pvc.yml @@ -34,6 +34,12 @@ parameters: - name: DEPLOYMENT_STRATEGY description: Strategy of Deploymentconfig value: "Rolling" + - name: PERSISTENT_STORAGE_CLASS + description: Name of the Storage Class to use + value: "" + - name: PERSISTENT_STORAGE_SIZE + description: Size of the Storage to request + value: "5Gi" objects: - apiVersion: v1 kind: PersistentVolumeClaim @@ -42,6 +48,8 @@ objects: spec: accessModes: - ReadWriteOnce + storageClassName: "${PERSISTENT_STORAGE_CLASS}" resources: requests: - storage: 5Gi + storage: ${PERSISTENT_STORAGE_SIZE} + diff --git a/images/oc-build-deploy-dind/openshift-templates/mariadb/deployment.yml b/images/oc-build-deploy-dind/openshift-templates/mariadb/deployment.yml index 774a47aaed..fb8d254890 100644 --- a/images/oc-build-deploy-dind/openshift-templates/mariadb/deployment.yml +++ b/images/oc-build-deploy-dind/openshift-templates/mariadb/deployment.yml @@ -40,6 +40,12 @@ parameters: - name: CRONJOBS description: Oneline of Cronjobs value: "" + - name: PERSISTENT_STORAGE_SIZE + description: Size of the Storage to request + value: "5Gi" + - name: PERSISTENT_STORAGE_CLASS + description: Name of the Storage Class to use + value: "" objects: - apiVersion: v1 kind: DeploymentConfig @@ -100,3 +106,5 @@ objects: cpu: 10m memory: 10Mi test: false + triggers: + - type: ConfigChange diff --git a/images/oc-build-deploy-dind/openshift-templates/mariadb/pvc.yml b/images/oc-build-deploy-dind/openshift-templates/mariadb/pvc.yml index d4f4a0eb4d..b5ade28755 100644 --- a/images/oc-build-deploy-dind/openshift-templates/mariadb/pvc.yml +++ b/images/oc-build-deploy-dind/openshift-templates/mariadb/pvc.yml @@ -34,6 +34,12 @@ parameters: - name: DEPLOYMENT_STRATEGY description: Strategy of Deploymentconfig value: "Rolling" + - name: PERSISTENT_STORAGE_SIZE + description: Size of the Storage to request + value: "5Gi" + - name: PERSISTENT_STORAGE_CLASS + description: Name of the Storage Class to use + value: "" objects: - apiVersion: v1 kind: PersistentVolumeClaim @@ -42,6 +48,7 @@ objects: spec: accessModes: - ReadWriteOnce + storageClassName: "${PERSISTENT_STORAGE_CLASS}" resources: requests: - storage: 5Gi \ No newline at end of file + storage: ${PERSISTENT_STORAGE_SIZE} diff --git a/images/oc-build-deploy-dind/scripts/convert-crontab.sh b/images/oc-build-deploy-dind/scripts/convert-crontab.sh index 3f11f2aa1a..ca88a0aae3 100755 --- a/images/oc-build-deploy-dind/scripts/convert-crontab.sh +++ b/images/oc-build-deploy-dind/scripts/convert-crontab.sh @@ -7,6 +7,9 @@ function join { local IFS="$1"; shift; echo "$*"; } index=0 +# Seed is used to generate the "random" numbers +SEED=$(echo "$1" | cksum | cut -f 1 -d " ") + while read piece do @@ -14,7 +17,7 @@ do if [ "$index" = "0" ]; then if [[ $piece =~ ^H$ ]]; then # If just an H is defined, we generate a random minute - MINUTES=$((0 + RANDOM % 59)) + MINUTES=$((SEED % 59)) elif [[ $piece =~ ^(H|\*)\/([0-5]?[0-9])$ ]]; then # A Minute like H/15 or (*/15 for backwards compatibility) is defined, create a list of minutes with a random start @@ -22,7 +25,7 @@ do STEP=${BASH_REMATCH[2]} # Generate a random start within the given step to prevent that all cronjobs start at the same time # but still incorporate the wished step - COUNTER=$((0 + RANDOM % $STEP)) + COUNTER=$((SEED % $STEP)) MINUTES_ARRAY=() while [ $COUNTER -lt 60 ]; do MINUTES_ARRAY+=($COUNTER) @@ -45,7 +48,7 @@ do elif [ "$index" = "1" ]; then if [[ $piece =~ ^H$ ]]; then # If just an H is defined, we generate a random hour - HOURS=$((0 + RANDOM % 23)) + HOURS=$((SEED % 23)) else HOURS=$piece fi @@ -65,6 +68,6 @@ do #increment index index=$((index+1)) -done < <(echo $1 | tr " " "\n") +done < <(echo $2 | tr " " "\n") echo "${MINUTES} ${HOURS} ${DAYS} ${MONTHS} ${DAY_WEEK}" \ No newline at end of file diff --git a/images/oc-build-deploy-dind/scripts/exec-monitor-daemonset.sh b/images/oc-build-deploy-dind/scripts/exec-monitor-daemonset.sh new file mode 100755 index 0000000000..73835c00e4 --- /dev/null +++ b/images/oc-build-deploy-dind/scripts/exec-monitor-daemonset.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +DESIRED_NUMBER=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get daemonset "${DAEMONSET}" -o=go-template --template='{{.status.desiredNumberScheduled}}') +MAX_WAIT_SECONDS=600 +END=$((SECONDS+$MAX_WAIT_SECONDS)) + +while true; do + if [[ $SECONDS -gt $END ]]; then + echo "Daemonset '${DAEMONSET}' was not fully scaled within $MAX_WAIT_SECONDS seconds" + exit + fi + + NUMBER_READY=$(oc --insecure-skip-tls-verify -n ${OPENSHIFT_PROJECT} get daemonset "${DAEMONSET}" -o=go-template --template='{{.status.numberReady}}') + if [[ $NUMBER_READY == $DESIRED_NUMBER ]]; then + echo "Daemonset '${DAEMONSET}' ready: $NUMBER_READY of $DESIRED_NUMBER ready" + break + else + echo "Daemonset '${DAEMONSET}' not ready yet: $NUMBER_READY of $DESIRED_NUMBER ready, waiting..." + fi + + sleep 10 +done diff --git a/images/varnish-drupal/drupal.vcl b/images/varnish-drupal/drupal.vcl index 9b9d38e69a..a7698ddf04 100644 --- a/images/varnish-drupal/drupal.vcl +++ b/images/varnish-drupal/drupal.vcl @@ -95,7 +95,7 @@ sub vcl_recv { } # Only allow BAN requests from IP addresses in the 'purge' ACL. - if (req.method == "BAN") { + if (req.method == "BAN" || req.method == "URIBAN" || req.method == "PURGE") { # Only allow BAN from defined ACL if (!client.ip ~ purge) { return (synth(403, "Your IP is not allowed.")); @@ -104,25 +104,27 @@ sub vcl_recv { # Only allows BAN if the Host Header has the style of with "${SERVICE_NAME:-varnish}:8080" or "${SERVICE_NAME:-varnish}". # Such a request is only possible from within the Docker network, as a request from external goes trough the Kubernetes Router and for that needs a proper Host Header if (!req.http.host ~ "^${SERVICE_NAME:-varnish}(:\d+)?$") { - return (synth(403, "BAN only allowed from within own network.")); + return (synth(403, "Only allowed from within own network.")); } - # Logic for the ban, using the Cache-Tags header. - if (req.http.Cache-Tags) { - ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags); - } - else { - return (synth(403, "Cache-Tags header missing.")); + if (req.method == "BAN") { + # Logic for the ban, using the Cache-Tags header. + if (req.http.Cache-Tags) { + ban("obj.http.Cache-Tags ~ " + req.http.Cache-Tags); + # Throw a synthetic page so the request won't go to the backend. + return (synth(200, "Ban added.")); + } + else { + return (synth(403, "Cache-Tags header missing.")); + } } - # Throw a synthetic page so the request won't go to the backend. - return (synth(200, "Ban added.")); - } + if (req.method == "URIBAN" || req.method == "PURGE") { + ban("req.url ~ " + req.url); + # Throw a synthetic page so the request won't go to the backend. + return (synth(200, "Ban added.")); + } - if (req.method == "URIBAN") { - ban("req.http.host == " + req.http.host + " && req.url == " + req.url); - # Throw a synthetic page so the request won't go to the backend. - return (synth(200, "Ban added.")); } # Non-RFC2616 or CONNECT which is weird, we pipe that diff --git a/images/varnish/Dockerfile b/images/varnish/Dockerfile index bc58d236fe..2fd4b9f56b 100644 --- a/images/varnish/Dockerfile +++ b/images/varnish/Dockerfile @@ -33,9 +33,10 @@ EXPOSE 8080 # tells the local development environment on which port we are running ENV LAGOON_LOCALDEV_HTTP_PORT=8080 -# Make http_resp_hdr_len and http_resp_size configurable (see in docker-entrypoint) -ENV HTTP_RESP_HDR_LEN=8k -ENV HTTP_RESP_SIZE=32k +ENV HTTP_RESP_HDR_LEN=8k \ + HTTP_RESP_SIZE=32k \ + NUKE_LIMIT=150 \ + CACHE_SIZE=100M ENTRYPOINT ["/sbin/tini", "--", "/lagoon/entrypoints.sh"] -CMD [" /varnish-start.sh"] +CMD ["/varnish-start.sh"] diff --git a/images/varnish/docker-entrypoint b/images/varnish/docker-entrypoint index b536657b77..5d0aee6097 100644 --- a/images/varnish/docker-entrypoint +++ b/images/varnish/docker-entrypoint @@ -1,5 +1,3 @@ #!/bin/sh ep /etc/varnish/* - -exec /usr/sbin/varnishd -a :8080 -T :6082 -F -f /etc/varnish/default.vcl -S /etc/varnish/secret -p http_resp_hdr_len=$HTTP_RESP_HDR_LEN -p http_resp_size=$HTTP_RESP_SIZE diff --git a/images/varnish/varnish-start.sh b/images/varnish/varnish-start.sh old mode 100644 new mode 100755 index 9b44ef02c0..8b6341ea20 --- a/images/varnish/varnish-start.sh +++ b/images/varnish/varnish-start.sh @@ -1,2 +1,2 @@ #!/bin/sh -/usr/sbin/varnishd -a :8080 -T :6082 -F -f /etc/varnish/default.vcl -S /etc/varnish/secret -p http_resp_hdr_len=$HTTP_RESP_HDR_LEN -p http_resp_size=$HTTP_RESP_SIZE +/usr/sbin/varnishd -a :8080 -T :6082 -F -f /etc/varnish/default.vcl -S /etc/varnish/secret -p http_resp_hdr_len=$HTTP_RESP_HDR_LEN -p http_resp_size=$HTTP_RESP_SIZE -p nuke_limit=$NUKE_LIMIT -s malloc,$CACHE_SIZE diff --git a/local-dev/api-data/api-data.gql b/local-dev/api-data/api-data.gql index e293515638..a0e5778967 100644 --- a/local-dev/api-data/api-data.gql +++ b/local-dev/api-data/api-data.gql @@ -415,6 +415,30 @@ mutation FillAPI { id } + CiFeaturesSubfolder: addProject( + input: { + id: 17 + name: "ci-features-subfolder" + customer: 3 + openshift: 2 + git_url: "ssh://git@192.168.99.1:2222/git/features-subfolder.git" + production_environment: "master" + subfolder:"subfolder1/subfolder2" + } + ) { + id + } + + CiFeaturesSubfolderRocketChat: addNotificationToProject( + input: { + project: "ci-features-subfolder" + notificationType: ROCKETCHAT + notificationName: "amazeeio--lagoon-local-ci" + } + ) { + id + } + CiElasticsearch: addProject( input: { id: 14 diff --git a/local-dev/git/Dockerfile b/local-dev/git/Dockerfile index 944da0fe2f..3e34250827 100644 --- a/local-dev/git/Dockerfile +++ b/local-dev/git/Dockerfile @@ -24,7 +24,8 @@ RUN mkdir -m 700 /git/.ssh && \ git --bare init /git/drupal-postgres.git && \ git --bare init /git/nginx.git && \ git --bare init /git/features.git && \ - git --bare init /git/elasticsearch.git + git --bare init /git/features-subfolder.git && \ + git --bare init /git/elasticsearch.git USER root diff --git a/node-packages/commons/src/api.js b/node-packages/commons/src/api.js index 6f544c1c1a..e02ad05cba 100644 --- a/node-packages/commons/src/api.js +++ b/node-packages/commons/src/api.js @@ -67,6 +67,7 @@ async function getProjectsByGitUrl(gitUrl: string): Promise { token project_user router_pattern + project_pattern } } } @@ -223,11 +224,13 @@ const getOpenShiftInfoForProject = (project: string): Promise => token project_user router_pattern + project_pattern } customer { private_key } git_url + subfolder production_environment } } diff --git a/openshift-setup/clusterrole-daemonset-admin.yaml b/openshift-setup/clusterrole-daemonset-admin.yaml new file mode 100644 index 0000000000..4c739ee834 --- /dev/null +++ b/openshift-setup/clusterrole-daemonset-admin.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: daemonset-admin +rules: +- apiGroups: + - extensions + attributeRestrictions: null + resources: + - daemonsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/openshift-setup/docker-host-minishift.yaml b/openshift-setup/docker-host-minishift.yaml deleted file mode 100644 index 769f33840d..0000000000 --- a/openshift-setup/docker-host-minishift.yaml +++ /dev/null @@ -1,98 +0,0 @@ -apiVersion: v1 -kind: Template -metadata: - creationTimestamp: null - name: lagoon-docker-host -parameters: - - name: IMAGE - description: Image that should be used - value: docker.io/amazeeiolagoon/master-docker-host - - name: REPOSITORY_TO_UPDATE - description: Repository that should be updated by the cronjob - value: amazeeio -objects: -- apiVersion: v1 - kind: DeploymentConfig - metadata: - labels: - app: docker-host - name: docker-host - spec: - replicas: 1 - selector: - app: docker-host - deploymentconfig: docker-host - strategy: - type: Recreate - template: - metadata: - labels: - app: docker-host - deploymentconfig: docker-host - spec: - containers: - - image: ${IMAGE} - imagePullPolicy: Always - livenessProbe: - failureThreshold: 3 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 2375 - timeoutSeconds: 1 - name: docker-host - env: - - name: DOCKER_HOST - value: localhost - - name: REPOSITORY_TO_UPDATE - value: ${REPOSITORY_TO_UPDATE} - ports: - - containerPort: 2375 - protocol: TCP - readinessProbe: - failureThreshold: 3 - periodSeconds: 10 - successThreshold: 1 - tcpSocket: - port: 2375 - timeoutSeconds: 1 - resources: {} - securityContext: - privileged: true - volumeMounts: - - mountPath: /var/lib/docker - name: docker-lib - restartPolicy: Always - serviceAccount: docker-host - serviceAccountName: docker-host - volumes: - - name: docker-lib - emptyDir: {} - test: false - triggers: - - type: ConfigChange - - imageChangeParams: - automatic: true - containerNames: - - docker-host - from: - kind: ImageStreamTag - name: docker-host:latest - type: ImageChange -- apiVersion: v1 - kind: Service - metadata: - creationTimestamp: null - labels: - app: docker-host - name: docker-host - spec: - ports: - - port: 2375 - protocol: TCP - targetPort: 2375 - selector: - app: docker-host - deploymentconfig: docker-host - status: - loadBalancer: {} \ No newline at end of file diff --git a/services/api-db/docker-entrypoint-initdb.d/00-setup.sql b/services/api-db/docker-entrypoint-initdb.d/00-setup.sql index f521184380..f93259c8f7 100644 --- a/services/api-db/docker-entrypoint-initdb.d/00-setup.sql +++ b/services/api-db/docker-entrypoint-initdb.d/00-setup.sql @@ -25,6 +25,7 @@ CREATE TABLE IF NOT EXISTS openshift ( token varchar(1000), router_pattern varchar(300), project_user varchar(100), + project_pattern varchar(300), ssh_host varchar(300), ssh_port varchar(50), created timestamp DEFAULT CURRENT_TIMESTAMP @@ -50,6 +51,7 @@ CREATE TABLE IF NOT EXISTS project ( name varchar(100) UNIQUE, customer int REFERENCES customer (id), git_url varchar(300), + subfolder varchar(300), active_systems_deploy varchar(300), active_systems_promote varchar(300), active_systems_remove varchar(300), @@ -331,6 +333,44 @@ CREATE OR REPLACE PROCEDURE END; $$ +CREATE OR REPLACE PROCEDURE + add_project_pattern_to_openshift() + + BEGIN + + IF NOT EXISTS( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE table_name = 'openshift' + AND table_schema = 'infrastructure' + AND column_name = 'project_pattern' + ) THEN + ALTER TABLE `openshift` ADD `project_pattern` varchar(300); + + END IF; + + END; +$$ + +CREATE OR REPLACE PROCEDURE + add_subfolder_to_project() + + BEGIN + + IF NOT EXISTS( + SELECT NULL + FROM INFORMATION_SCHEMA.COLUMNS + WHERE table_name = 'project' + AND table_schema = 'infrastructure' + AND column_name = 'subfolder' + ) THEN + ALTER TABLE `project` ADD `subfolder` varchar(300); + + END IF; + + END; +$$ + DELIMITER ; CALL add_production_environment_to_project; @@ -343,3 +383,5 @@ CALL add_autoidle_to_project; CALL add_enum_rocketchat_to_type_in_project_notification(); CALL add_deleted_to_environment; CALL add_storagecalc_to_project(); +CALL add_project_pattern_to_openshift(); +CALL add_subfolder_to_project(); diff --git a/services/api-db/docker-entrypoint-initdb.d/01-procedures.sql b/services/api-db/docker-entrypoint-initdb.d/01-procedures.sql index ac07c6b0c5..76f65d2cd7 100644 --- a/services/api-db/docker-entrypoint-initdb.d/01-procedures.sql +++ b/services/api-db/docker-entrypoint-initdb.d/01-procedures.sql @@ -9,6 +9,7 @@ CREATE OR REPLACE PROCEDURE IN name varchar(100), IN customer int, IN git_url varchar(300), + IN subfolder varchar(300), IN openshift int, IN active_systems_deploy varchar(300), IN active_systems_promote varchar(300), @@ -40,6 +41,7 @@ CREATE OR REPLACE PROCEDURE name, customer, git_url, + subfolder, active_systems_deploy, active_systems_promote, active_systems_remove, @@ -55,6 +57,7 @@ CREATE OR REPLACE PROCEDURE name, c.id, git_url, + subfolder, active_systems_deploy, active_systems_promote, active_systems_remove, @@ -337,6 +340,7 @@ CREATE OR REPLACE PROCEDURE IN p_token varchar(1000), IN p_router_pattern varchar(300), IN p_project_user varchar(100), + IN p_project_pattern varchar(300), IN p_ssh_host varchar(300), IN p_ssh_port varchar(50) ) @@ -354,6 +358,7 @@ CREATE OR REPLACE PROCEDURE token, router_pattern, project_user, + project_pattern, ssh_host, ssh_port ) VALUES ( @@ -363,6 +368,7 @@ CREATE OR REPLACE PROCEDURE p_token, p_router_pattern, p_project_user, + p_project_pattern, p_ssh_host, p_ssh_port ); diff --git a/services/api/src/dao/openshift.js b/services/api/src/dao/openshift.js index 58bd2aa3d8..f29e5b84e3 100644 --- a/services/api/src/dao/openshift.js +++ b/services/api/src/dao/openshift.js @@ -37,6 +37,7 @@ const addOpenshift = ({ sqlClient }) => async (cred, input) => { ${input.token ? ':token' : 'NULL'}, ${input.router_pattern ? ':router_pattern' : 'NULL'}, ${input.project_user ? ':project_user' : 'NULL'}, + ${input.project_pattern? ':project_pattern' : 'NULL'}, ${input.ssh_host ? ':ssh_host' : 'NULL'}, ${input.ssh_port ? ':ssh_port' : 'NULL'} ); diff --git a/services/api/src/dao/project.js b/services/api/src/dao/project.js index 9378703118..76901490c2 100644 --- a/services/api/src/dao/project.js +++ b/services/api/src/dao/project.js @@ -153,6 +153,7 @@ const addProject = ({ sqlClient }) => async (cred, input) => { :name, :customer, :git_url, + ${input.subfolder ? ':subfolder' : 'NULL'}, :openshift, ${ input.active_systems_deploy diff --git a/services/api/src/schema.js b/services/api/src/schema.js index 9d4e944c28..834171061e 100644 --- a/services/api/src/schema.js +++ b/services/api/src/schema.js @@ -51,6 +51,7 @@ const typeDefs = ` token: String router_pattern: String project_user: String + project_pattern: String ssh_host: String ssh_port: String created: String @@ -83,6 +84,7 @@ const typeDefs = ` name: String customer: Customer git_url: String + subfolder: String notifications(type: NotificationType): [Notification] active_systems_deploy: String active_systems_promote: String @@ -169,6 +171,7 @@ const typeDefs = ` name: String! customer: Int! git_url: String! + subfolder: String openshift: Int! active_systems_deploy: String active_systems_promote: String @@ -208,6 +211,7 @@ const typeDefs = ` token: String router_pattern: String project_user: String + project_pattern: String ssh_host: String ssh_port: String } @@ -280,6 +284,7 @@ const typeDefs = ` name: String customer: Int git_url: String + subfolder: String active_systems_deploy: String active_systems_remove: String branches: String @@ -313,6 +318,7 @@ const typeDefs = ` token: String router_pattern: String project_user: String + project_pattern: String ssh_host: String ssh_port: String } diff --git a/services/auto-idler/.lagoon.yml b/services/auto-idler/.lagoon.yml index 9bf632fef6..fb799daa6b 100644 --- a/services/auto-idler/.lagoon.yml +++ b/services/auto-idler/.lagoon.yml @@ -160,4 +160,9 @@ objects: secretKeyRef: name: jwtsecret key: JWTSECRET + - name: LOGSDB_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + key: LOGSDB_ADMIN_PASSWORD + name: logs-db-admin-password restartPolicy: OnFailure \ No newline at end of file diff --git a/services/auto-idler/idle-services.sh b/services/auto-idler/idle-services.sh index 7488166ae1..c16bc73e0b 100755 --- a/services/auto-idler/idle-services.sh +++ b/services/auto-idler/idle-services.sh @@ -1,5 +1,8 @@ #!/bin/bash +# make sure we stop if we fail +set -eo pipefail + # Create an JWT Admin Token to talk to the API API_ADMIN_JWT_TOKEN=$(./create_jwt.sh) BEARER="Authorization: bearer $API_ADMIN_JWT_TOKEN" @@ -25,7 +28,7 @@ query=$(set -e -o pipefail; echo $GRAPHQL | sed 's/"/\\"/g' | sed 's/\\n/\\\\n/g DEVELOPMENT_ENVIRONMENTS=$(set -e -o pipefail; curl -s -XPOST -H 'Content-Type: application/json' -H "$BEARER" api:3000/graphql -d "{\"query\": \"$query\"}") # Load all hits of all environments of the last hour -ALL_ENVIRONMENT_HITS=$(curl -s -XGET "http://logs-db:9200/router-logs-*/_search" -H 'Content-Type: application/json' -d' +ALL_ENVIRONMENT_HITS=$(curl -s -u "admin:$LOGSDB_ADMIN_PASSWORD" -XGET "http://logs-db:9200/router-logs-*/_search" -H 'Content-Type: application/json' -d' { "size": 0, "aggs": { @@ -41,7 +44,7 @@ ALL_ENVIRONMENT_HITS=$(curl -s -XGET "http://logs-db:9200/router-logs-*/_search" "filter": { "range": { "@timestamp": { - "gte": "now-1h" + "gte": "now-4h" } } } @@ -49,6 +52,9 @@ ALL_ENVIRONMENT_HITS=$(curl -s -XGET "http://logs-db:9200/router-logs-*/_search" } }' | jq '.aggregations.group_by_openshift_project.buckets') +# All data successfully loaded, now we don't want to fail anymore if a single idleing fails +set +eo pipefail + # Filter only projects that actually have an environment # Loop through each found project echo "$DEVELOPMENT_ENVIRONMENTS" | jq -c '.data.developmentEnvironments[] | select((.environments|length)>=1)' | while read project @@ -78,11 +84,11 @@ echo "$DEVELOPMENT_ENVIRONMENTS" | jq -c '.data.developmentEnvironments[] | sele continue elif [ "$HAS_HITS" == "true" ]; then HITS=$(echo $ALL_ENVIRONMENT_HITS | jq ".[] | select(.key==\"$ENVIRONMENT_OPENSHIFT_PROJECTNAME\") | .doc_count") - echo "$OPENSHIFT_URL - $PROJECT_NAME: $ENVIRONMENT_NAME had $HITS hits in last hour, no idleing" + echo "$OPENSHIFT_URL - $PROJECT_NAME: $ENVIRONMENT_NAME had $HITS hits in last four hours, no idleing" else - echo "$OPENSHIFT_URL - $PROJECT_NAME: $ENVIRONMENT_NAME had no hits in last hour, starting to idle" + echo "$OPENSHIFT_URL - $PROJECT_NAME: $ENVIRONMENT_NAME had no hits in last four hours, starting to idle" # actually idleing happens here - oc --insecure-skip-tls-verify --token="$OPENSHIFT_TOKEN" --server="$OPENSHIFT_URL" -n "$ENVIRONMENT_OPENSHIFT_PROJECTNAME" idle --all + oc --insecure-skip-tls-verify --token="$OPENSHIFT_TOKEN" --server="$OPENSHIFT_URL" -n "$ENVIRONMENT_OPENSHIFT_PROJECTNAME" idle -l "service notin (mariadb,postgres)" ### Faster Unidling: ## Instead of depending that each endpoint is unidling their own service (which means it takes a lot of time to unidle multiple services) diff --git a/openshift-setup/docker-host.yaml b/services/docker-host/docker-host.yaml similarity index 52% rename from openshift-setup/docker-host.yaml rename to services/docker-host/docker-host.yaml index a16a5c5371..bd251fac54 100644 --- a/openshift-setup/docker-host.yaml +++ b/services/docker-host/docker-host.yaml @@ -4,34 +4,68 @@ metadata: creationTimestamp: null name: lagoon-docker-host parameters: - - name: IMAGE - description: Image that should be used - value: amazeeiolagoon/master-docker-host - name: REPOSITORY_TO_UPDATE description: Repository that should be updated by the cronjob value: amazeeio + - name: SERVICE_NAME + description: Name of this service + value: docker-host + - name: SAFE_BRANCH + description: Which branch this belongs to, special chars replaced with dashes + value: master + - name: SAFE_PROJECT + description: Which project this belongs to, special chars replaced with dashes + value: lagoon + - name: BRANCH + description: Which branch this belongs to, original value + value: master + - name: PROJECT + description: Which project this belongs to, original value + value: master + - name: LAGOON_GIT_SHA + description: git hash sha of the current deployment + value: '0000000000000000000000000000000000000000' + - name: SERVICE_ROUTER_URL + description: URL of the Router for this service + value: "" + - name: OPENSHIFT_PROJECT + description: Name of the Project that this service is in + value: lagoon + - name: REGISTRY + description: Registry where Images are pushed to + value: "" + - name: DEPLOYMENT_STRATEGY + description: Strategy of Deploymentconfig + value: "Recreate" + - name: SERVICE_IMAGE + description: Pullable image of service + value: amazeeiolagoon/master-docker-host objects: - apiVersion: v1 kind: DeploymentConfig metadata: labels: - app: docker-host - name: docker-host + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} spec: replicas: 1 selector: - app: docker-host - deploymentconfig: docker-host + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} strategy: - type: Recreate + type: ${DEPLOYMENT_STRATEGY} template: metadata: labels: - app: docker-host - deploymentconfig: docker-host + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} spec: containers: - - image: ${IMAGE} + - image: ${SERVICE_IMAGE} imagePullPolicy: Always livenessProbe: failureThreshold: 3 @@ -40,7 +74,7 @@ objects: tcpSocket: port: 2375 timeoutSeconds: 1 - name: docker-host + name: ${SERVICE_NAME} env: - name: DOCKER_HOST value: localhost @@ -60,7 +94,6 @@ objects: tcpSocket: port: 2375 timeoutSeconds: 1 - resources: {} securityContext: privileged: true volumeMounts: @@ -80,15 +113,14 @@ objects: metadata: creationTimestamp: null labels: - app: docker-host - name: docker-host + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} spec: ports: - port: 2375 protocol: TCP targetPort: 2375 selector: - app: docker-host - deploymentconfig: docker-host - status: - loadBalancer: {} \ No newline at end of file + service: ${SERVICE_NAME} \ No newline at end of file diff --git a/services/drush-alias/web/aliases.drushrc.php.stub b/services/drush-alias/web/aliases.drushrc.php.stub index 935b45a0a6..902f964940 100644 --- a/services/drush-alias/web/aliases.drushrc.php.stub +++ b/services/drush-alias/web/aliases.drushrc.php.stub @@ -265,7 +265,7 @@ $defaults = [ ]; $aliases = array_reduce($environments, function ($carry, $environment) use ($defaults, $ssh_host, $ssh_port, $drupal_path) { - $site_name = $environment->name; + $site_name = str_replace('/','-',$environment->name); $site_host = 'localhost'; $alias = []; diff --git a/services/logs-collector/.lagoon.yml b/services/logs-collector/.lagoon.yml index a17b69ae07..45b3b27519 100644 --- a/services/logs-collector/.lagoon.yml +++ b/services/logs-collector/.lagoon.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Template metadata: creationTimestamp: null - name: lagoon-openshift-template-fluentd + name: lagoon-remote-openshift-template-logs-collector parameters: - name: SERVICE_NAME description: Name of this service @@ -37,105 +37,246 @@ parameters: - name: SERVICE_IMAGE description: Pullable image of service required: true - - name: CRONJOBS - description: Oneliner of Cronjobs - value: "" objects: -- apiVersion: v1 - kind: DeploymentConfig +- apiVersion: extensions/v1beta1 + kind: DaemonSet metadata: - creationTimestamp: null labels: + service: ${SERVICE_NAME} branch: ${SAFE_BRANCH} project: ${SAFE_PROJECT} name: ${SERVICE_NAME} spec: - replicas: 1 selector: - service: ${SERVICE_NAME} - strategy: - resources: {} + matchLabels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} template: metadata: - creationTimestamp: null labels: service: ${SERVICE_NAME} branch: ${SAFE_BRANCH} project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} spec: containers: - - image: ${SERVICE_IMAGE} - name: ${SERVICE_NAME} - ports: - - containerPort: 24284 - protocol: TCP - readinessProbe: - tcpSocket: - port: 24284 - initialDelaySeconds: 20 - livenessProbe: - tcpSocket: - port: 24284 - initialDelaySeconds: 120 + - env: + - name: K8S_HOST_URL + value: https://kubernetes.default.svc.cluster.local + - name: ES_HOST + value: none + - name: ES_PORT + value: "9200" + - name: ES_CLIENT_CERT + value: /etc/fluent/keys/cert + - name: ES_CLIENT_KEY + value: /etc/fluent/keys/key + - name: ES_CA + value: /etc/fluent/keys/ca + - name: OPS_HOST + value: none + - name: OPS_PORT + value: "9200" + - name: OPS_CLIENT_CERT + value: /etc/fluent/keys/cert + - name: OPS_CLIENT_KEY + value: /etc/fluent/keys/key + - name: OPS_CA + value: /etc/fluent/keys/ca + - name: ES_COPY + value: "false" + - name: JOURNAL_SOURCE + - name: JOURNAL_READ_FROM_HEAD + - name: BUFFER_QUEUE_LIMIT + value: "32" + - name: BUFFER_SIZE_LIMIT + value: 8m + - name: FLUENTD_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: ${SERVICE_NAME} + divisor: "0" + resource: limits.cpu + - name: FLUENTD_MEMORY_LIMIT + valueFrom: + resourceFieldRef: + containerName: ${SERVICE_NAME} + divisor: "0" + resource: limits.memory + - name: FILE_BUFFER_LIMIT + value: 256Mi + - name: LOGS_FORWARDER_SHARED_KEY + value: secret envFrom: - configMapRef: name: lagoon-env + image: registry.access.redhat.com/openshift3/logging-fluentd:v3.6 + name: ${SERVICE_NAME} resources: + limits: + memory: 512Mi requests: - cpu: 10m - memory: 10Mi + cpu: 100m + memory: 512Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /run/log/journal + name: runlogjournal + - mountPath: /var/log + name: varlog + - mountPath: /var/lib/docker/containers + name: varlibdockercontainers + readOnly: true + - mountPath: /etc/fluent/configs.d/user + name: config + readOnly: true + - mountPath: /etc/docker-hostname + name: dockerhostname + readOnly: true + - mountPath: /etc/localtime + name: localtime + readOnly: true + - mountPath: /etc/sysconfig/docker + name: dockercfg + readOnly: true + - mountPath: /etc/docker + name: dockerdaemoncfg + readOnly: true + - mountPath: /var/lib/fluentd + name: filebufferstorage + - mountPath: /fluentd/ssl/ + name: ca-cert + nodeSelector: + lagoon-${SERVICE_NAME}: allowed + restartPolicy: Always + serviceAccount: logs-collector + serviceAccountName: logs-collector volumes: - - name: volume-4750i - secret: - defaultMode: 420 - items: - - key: FORWARD_KEY - path: ca_key.pem - - key: FORWARD_CERTIFICATE - path: ca_cert.pem - secretName: collector + - hostPath: + path: /run/log/journal + name: runlogjournal + - hostPath: + path: /var/log + name: varlog + - hostPath: + path: /var/lib/docker/containers + name: varlibdockercontainers - configMap: defaultMode: 420 + name: ${SERVICE_NAME}-config + name: config + - hostPath: + path: /etc/hostname + name: dockerhostname + - hostPath: + path: /etc/localtime + name: localtime + - hostPath: + path: /etc/sysconfig/docker + name: dockercfg + - hostPath: + path: /etc/docker + name: dockerdaemoncfg + - hostPath: + path: /var/lib/fluentd + name: filebufferstorage + - configMap: items: - - key: FLUENT_CONF - path: fluent.conf - name: lagoon-env - name: volume-g7qqi - terminationGracePeriodSeconds: 30 - volumeMounts: - - mountPath: /fluentd/ssl/ - name: volume-4750i - readOnly: true - - mountPath: /fluentd/etc/ - name: volume-g7qqi - test: false - triggers: - - type: ConfigChange - - imageChangeParams: - automatic: true - containerNames: - - ${SERVICE_NAME} - from: - kind: ImageStreamTag - name: ${SERVICE_NAME}:latest - type: ImageChange - status: {} + - key: LOGS_FORWARDER_CA_CERT + path: ca_cert.pem + name: ${SERVICE_NAME}-config + name: ca-cert + templateGeneration: 1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate - apiVersion: v1 - kind: Service + kind: ConfigMap metadata: - creationTimestamp: null - labels: - service: ${SERVICE_NAME} - branch: ${SAFE_BRANCH} - project: ${SAFE_PROJECT} - name: ${SERVICE_NAME} - spec: - ports: - - name: secure-forward - port: 24284 - protocol: TCP - targetPort: 24284 - selector: - service: ${SERVICE_NAME} - status: - loadBalancer: {} + name: ${SERVICE_NAME}-config + data: + fluent.conf: | + # This file is the fluentd configuration entrypoint. Edit with care. + + #@include configs.d/openshift/system.conf + + + log_level info + + + ## sources + ## ordered so that syslog always runs last... + #@include configs.d/dynamic/input-docker-*.conf + + + @type tail + @label @INGRESS + path "/var/log/containers/*.log" + pos_file "/var/log/es-containers.log.pos" + time_format %Y-%m-%dT%H:%M:%S.%N%Z + tag kubernetes.* + format json + keep_time_key true + read_from_head "true" + exclude_path ["/var/log/containers/logs-collector*", "/var/log/containers/logs-forwarder*"] + + + @include configs.d/dynamic/input-syslog-*.conf + @include configs.d/openshift/input-post-*.conf + ## + + + secure-forward.conf: | + + @type secure_forward + self_hostname "#{ENV['HOSTNAME']}" + secure true + shared_key "#{ENV['LOGS_FORWARDER_SHARED_KEY']}" + ca_cert_path "/fluentd/ssl/ca_cert.pem" + + host logs-forwarder + port 24284 + + + flush_interval 1s + + LOGS_FORWARDER_CA_CERT: |- + -----BEGIN CERTIFICATE----- + MIIDIDCCAggCAQEwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCVVMxCzAJBgNV + BAgMAkNBMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRkwFwYDVQQDDBBTZWN1cmVG + b3J3YXJkIENBMB4XDTcwMDEwMTAwMDAwMFoXDTIzMDUzMTE4MTQyOVowTTELMAkG + A1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRkw + FwYDVQQDDBBTZWN1cmVGb3J3YXJkIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A + MIIBCgKCAQEA60S/3/jRlMoNEwnCPsa++7vuQ5wScEPkCikTmisH400LUHG+yl4L + pxp3yYJqSfUougyDE3D9Tm/1toexrZJS0uLotNxdsbwcJfJaAdO5T5auhWkxzwQp + RCALXgOI+JdIIkj6qSuX3HJIqKOnWRzr8H/0tBM0z2sVT86VG8f4VPgMhfKfN3dg + XiZjM70BZfw5lq3QIbyyP7zjdN8h0rgleC3VpyKl9pU3T0aXsSFfREqst9M6kvbY + ls0kciEdAaN/KZxb11gAZpBFe4pDYTR77ux3QSuZFpqWx0y2ZKbD8OYKDows+khB + BQc1hIhmQoGc9Y9q37x7KW9q0RShnitulQIDAQABoxAwDjAMBgNVHRMEBTADAQH/ + MA0GCSqGSIb3DQEBCwUAA4IBAQA+K6qyduJPWAuBa+o0IzSvgHvnoEw6CsiZ7V9G + qKlUNkT7nf8lrPAA5mVC+B3vt/dM91+X3zDTzY/DUHK+tRyNs3S0JvMTym4JuZ/F + mUOpRzna+75//8YwUVto9CnE/0Ae+j+6A0WUzxOQ0SD5Z77h1gQHOw9OKnZoyGJy + T1UbP15BHknWLLXRZkeFK+GD+QZ7vZCWdCkjv29Yz3n5dTFRp5UhguuS6W0FY/8R + E0okMYItqUUI5HdZK+2QsdOOk+RyU+gG0+2sLQV3FS4g7BMz5eUR4UBB3trU9x6b + 3RRMk1JALaQKR59kFLGtQeNpz5LGfJufgfCvRptdNo3S/mQZ + -----END CERTIFICATE----- \ No newline at end of file diff --git a/services/logs-collector/deploy.sh b/services/logs-collector/deploy.sh deleted file mode 100755 index e1bf6e2c51..0000000000 --- a/services/logs-collector/deploy.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh - -# There are some additional steps to deploy this service. - -sudo gem install fluent-plugin-secure-forward - -secure-forward-ca-generate "$(pwd)" $FLUENT_PASSPHRASE - -oc create secret generic collector \ - --from-file=FORWARD_CERTIFICATE=ca_cert.pem \ - --from-file=FORWARD_KEY=ca_key.pem \ - --from-literal=FORWARD_PASSPHRASE=${FLUENT_PASSPHRASE} diff --git a/services/logs-collector/fluent.conf b/services/logs-collector/fluent.conf deleted file mode 100644 index 8b7b43e8bd..0000000000 --- a/services/logs-collector/fluent.conf +++ /dev/null @@ -1,9 +0,0 @@ - - @type secure_forward - shared_key secret_string - self_hostname server.fqdn.local # This fqdn is used as CN (Common Name) of certificates - secure true - ca_cert_path /fluentd/ssl/ca_cert.pem - ca_private_key_path /fluentd/ssl/ca_key.pem - ca_private_key_passphrase danieljackson - diff --git a/services/logs-collector/plugins/.gitignore b/services/logs-collector/plugins/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/services/logs-forwarder/.lagoon.yml b/services/logs-forwarder/.lagoon.yml new file mode 100644 index 0000000000..27f32cc476 --- /dev/null +++ b/services/logs-forwarder/.lagoon.yml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: Template +metadata: + creationTimestamp: null + name: lagoon-openshift-template-fluentd +parameters: + - name: SERVICE_NAME + description: Name of this service + required: true + - name: SAFE_BRANCH + description: Which branch this belongs to, special chars replaced with dashes + required: true + - name: SAFE_PROJECT + description: Which project this belongs to, special chars replaced with dashes + required: true + - name: BRANCH + description: Which branch this belongs to, original value + required: true + - name: PROJECT + description: Which project this belongs to, original value + required: true + - name: LAGOON_GIT_SHA + description: git hash sha of the current deployment + required: true + - name: SERVICE_ROUTER_URL + description: URL of the Router for this service + value: "" + - name: OPENSHIFT_PROJECT + description: Name of the Project that this service is in + required: true + - name: REGISTRY + description: Registry where Images are pushed to + required: true + - name: DEPLOYMENT_STRATEGY + description: Strategy of Deploymentconfig + value: "Rolling" + - name: SERVICE_IMAGE + description: Pullable image of service + required: true + - name: CRONJOBS + description: Oneliner of Cronjobs + value: "" +objects: +- apiVersion: v1 + kind: DeploymentConfig + metadata: + creationTimestamp: null + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} + spec: + replicas: 1 + selector: + service: ${SERVICE_NAME} + strategy: + type: ${DEPLOYMENT_STRATEGY} + template: + metadata: + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + spec: + containers: + - image: ${SERVICE_IMAGE} + name: ${SERVICE_NAME} + ports: + - containerPort: 24284 + protocol: TCP + readinessProbe: + tcpSocket: + port: 24284 + initialDelaySeconds: 20 + livenessProbe: + tcpSocket: + port: 24284 + initialDelaySeconds: 120 + env: + - name: LOGSDB_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + key: LOGSDB_ADMIN_PASSWORD + name: logs-db-admin-password + envFrom: + - configMapRef: + name: lagoon-env + resources: + requests: + cpu: 10m + memory: 10Mi + volumeMounts: + - mountPath: /fluentd/etc/ + name: config + volumes: + - configMap: + items: + - key: FLUENT_CONF + path: fluent.conf + name: ${SERVICE_NAME}-config + name: config + triggers: + - type: ConfigChange +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: ${SERVICE_NAME} + spec: + ports: + - name: secure-forward + port: 24284 + protocol: TCP + targetPort: 24284 + type: NodePort + selector: + service: ${SERVICE_NAME} +- apiVersion: v1 + kind: ConfigMap + metadata: + name: ${SERVICE_NAME}-config + data: + FLUENT_CONF: |- + + log_level info + + + + @type secure_forward + @label @ELASTICSEARCH + self_hostname "#{ENV['HOSTNAME']}" + secure true + port 24284 + shared_key "#{ENV['LOGS_FORWARDER_SHARED_KEY']}" + ca_cert_path /fluentd/ssl/ca_cert.pem + ca_private_key_path /fluentd/ssl/ca_key.pem + ca_private_key_passphrase "#{ENV['LOGS_FORWARDER_PRIVATE_KEY_PASSPHRASE']}" + + + diff --git a/services/logs-collector/Dockerfile b/services/logs-forwarder/Dockerfile similarity index 68% rename from services/logs-collector/Dockerfile rename to services/logs-forwarder/Dockerfile index 9f5e485f5f..d0e9b9daf2 100644 --- a/services/logs-collector/Dockerfile +++ b/services/logs-forwarder/Dockerfile @@ -1,4 +1,4 @@ -FROM fluent/fluentd:v0.12-onbuild +FROM fluent/fluentd:v0.12 USER root @@ -8,17 +8,20 @@ RUN apk add --update --virtual .build-deps \ fluent-plugin-elasticsearch \ fluent-plugin-secure-forward \ fluent-plugin-record-reformer \ - # fluent-plugin-anonymizer \ && gem sources --clear-all \ && apk del .build-deps \ && rm -rf /var/cache/apk/* \ /home/fluent/.gem/ruby/2.3.0/cache/*.gem COPY entrypoint.sh /bin/ +COPY ca_cert.pem ca_key.pem /fluentd/ssl/ RUN find "/fluentd" -exec chgrp 0 {} \; RUN find "/fluentd" -exec chmod g+rw {} \; RUN find "/fluentd" -type d -exec chmod g+x {} + +ENV LOGS_FORWARDER_SHARED_KEY=secret \ + LOGS_FORWARDER_PRIVATE_KEY_PASSPHRASE=amazing1 + ENTRYPOINT ["/bin/entrypoint.sh"] -CMD exec fluentd -c /fluentd/etc/${FLUENTD_CONF} -p /fluentd/plugins $FLUENTD_OPT +CMD ["sh", "-c", "exec fluentd -c /fluentd/etc/${FLUENTD_CONF} -p /fluentd/plugins $FLUENTD_OPT"] diff --git a/services/logs-forwarder/ca_cert.pem b/services/logs-forwarder/ca_cert.pem new file mode 100644 index 0000000000..6ff51b8fb7 --- /dev/null +++ b/services/logs-forwarder/ca_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDIDCCAggCAQEwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRkwFwYDVQQDDBBTZWN1cmVG +b3J3YXJkIENBMB4XDTcwMDEwMTAwMDAwMFoXDTIzMDUzMTE4MTQyOVowTTELMAkG +A1UEBhMCVVMxCzAJBgNVBAgMAkNBMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRkw +FwYDVQQDDBBTZWN1cmVGb3J3YXJkIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA60S/3/jRlMoNEwnCPsa++7vuQ5wScEPkCikTmisH400LUHG+yl4L +pxp3yYJqSfUougyDE3D9Tm/1toexrZJS0uLotNxdsbwcJfJaAdO5T5auhWkxzwQp +RCALXgOI+JdIIkj6qSuX3HJIqKOnWRzr8H/0tBM0z2sVT86VG8f4VPgMhfKfN3dg +XiZjM70BZfw5lq3QIbyyP7zjdN8h0rgleC3VpyKl9pU3T0aXsSFfREqst9M6kvbY +ls0kciEdAaN/KZxb11gAZpBFe4pDYTR77ux3QSuZFpqWx0y2ZKbD8OYKDows+khB +BQc1hIhmQoGc9Y9q37x7KW9q0RShnitulQIDAQABoxAwDjAMBgNVHRMEBTADAQH/ +MA0GCSqGSIb3DQEBCwUAA4IBAQA+K6qyduJPWAuBa+o0IzSvgHvnoEw6CsiZ7V9G +qKlUNkT7nf8lrPAA5mVC+B3vt/dM91+X3zDTzY/DUHK+tRyNs3S0JvMTym4JuZ/F +mUOpRzna+75//8YwUVto9CnE/0Ae+j+6A0WUzxOQ0SD5Z77h1gQHOw9OKnZoyGJy +T1UbP15BHknWLLXRZkeFK+GD+QZ7vZCWdCkjv29Yz3n5dTFRp5UhguuS6W0FY/8R +E0okMYItqUUI5HdZK+2QsdOOk+RyU+gG0+2sLQV3FS4g7BMz5eUR4UBB3trU9x6b +3RRMk1JALaQKR59kFLGtQeNpz5LGfJufgfCvRptdNo3S/mQZ +-----END CERTIFICATE----- \ No newline at end of file diff --git a/services/logs-forwarder/ca_key.pem b/services/logs-forwarder/ca_key.pem new file mode 100644 index 0000000000..f9d859b2bf --- /dev/null +++ b/services/logs-forwarder/ca_key.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-256-CBC,521127E3EAEFF3D17444685300173DFF + +rCU605eIzXyhonVbQgylk5lJMsiOv9EdM9FtLepGjuA49rG9DnLLgUivl3U2Lwjl +8RKqAnxX1BwTRooGnR9REjV5aUV35IzDUv4EFldHnOqPMN2dM0rQuLJd04uvVtD0 +yZhc3PbFiLU4rcSsBda9XYtbr/anruvT6+WVzLVh3rTdF/n1qERvoUI0RFAAzs6x +5B4OlCW2bFsENE6TJQw8IMrh+ERVhs0tWbeHufdsP2a71RrdIda9u6icVQNuEtE4 +yqjT5ww7K3luLgG7OGC9IpJSypVRWRnjPHq9aA0+K7ZP/WIOW+ZnwA1q/KlFHqX8 +uOCn46vIWvgsDFNeFHmVJ2Fr3ty5W8LPZw77FQuIkOvw8Fj3iUl+Z4PfAHWIXxLM +UB8rchuDqEWCgK2vuQzCZDIEYc+3GRn3XcL29Ic69DbpHa/dn9vybEzY8ANMadh5 +b5fb1IzJJIaJfUjRpSZ+53JRZo6uK0FM6XEIpkBv3KcsiXbJj2xDMw0kpkDAyKUn +8P6MgxPAZpOn7mX9fCOKFoz6qudWm0Gg+1DLxUN3XUqWarYkBHWA9FoKaWRgTcCj +UHKMlaXcUHLt8Bd43f91B9+H3takywlIAlgQA7tEimcfiWKDOpdsdkoXKADMaKqI +tpBtrSO5hxO4GzVU8OmubGigGLQgPrFkAddqrAq5IjaW38R8i4xYBPOEsN4vbSEZ +KGpyLIgkA1RaUYpGgTHecCa2V4zgn/qRsxjzHBt8y1cpZPSjSrn7Mm9MC53fhUvZ +PzRzySnc42jBFUaNMSKKUI2XIYYd3qrff1xlCNEuFRyw0RbJ0RVnjmCtWbRz/3Ag +jVhbx50YXu8KdpMVUaB1Hsq7NiMyRJSpL29cxmakFTmIgb3sE3TzWqxNnmbgbO7B +j3ftKJ/RT/fUKIHgEiRTcOTVGeZuB1JCX6CcPGlgaq7WzLtaSmUxPwRjSH52VfZb +V1z9MuqSbSjaLA55Wwt9U98YWErwXv5LSgTTT/FD4rAREW0qgRLdN9ruJDTNIWpQ +4u4CDcrkR5ojzniwFwxuiVSfU0EbHflFTaXe3YPPNsaysWFbycknpzC4kJPEZJxS +96ATV6FVYnz2sEBNncAjJFjqPLieKKJS69cUZUxO2icSkgNk6hzTCxDQKZsTpMiP +/P7gTWf6eoREphAkyhATsRhmbjfcoWA+EndszvXvdYE8svg/5jPQV4cRCtxGFW+o +tqxVEvVmh0VrgN2aClWJ0ldh4RITYBsnsKrPB3R4K9m8og/DVyLHVXL8dnVcx7tP +YOuHhgyFGH/dQXAjSCBwmTmExr7A3hHBxDrdDoxjc5i76RC0yfwoMJcWQW8HYx5c +CIAD4+biKC3HdWbyu4myAmqpg0V1yyIjMEpw2RO4gkMBk1ZHxAz/5ZqjN2U0BjpC +9WoRdFN5AAxm8KrCGDLOee1+jkdUjf4JYNOMQr7m/8X7faEK6Dh8J8NJlSIBeMb6 +qalWCu1g8kWGZdOvMyPtu2ZCOBTEvwVQL5acIG8i3Bw7l0epA6OIF43zfQ4nfWW0 +nBcAjjexVvEXVwHiJBhinlwP9lpGBZLpo5/2ClFCJqgSTPG+cKfFQAlhUosp03i0 +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/services/logs-collector/entrypoint.sh b/services/logs-forwarder/entrypoint.sh similarity index 100% rename from services/logs-collector/entrypoint.sh rename to services/logs-forwarder/entrypoint.sh diff --git a/services/logs2logs-db/.lagoon.yml b/services/logs2logs-db/.lagoon.yml index 60119efb9d..356169b010 100644 --- a/services/logs2logs-db/.lagoon.yml +++ b/services/logs2logs-db/.lagoon.yml @@ -159,11 +159,27 @@ objects: port: 28777 protocol: TCP targetPort: 28777 - - name: http-input-module - port: 8080 + - name: beats-input-module + port: 5044 protocol: TCP - targetPort: 8080 + targetPort: 5044 + type: NodePort selector: service: ${SERVICE_NAME} - status: - loadBalancer: {} \ No newline at end of file +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + service: ${SERVICE_NAME} + branch: ${SAFE_BRANCH} + project: ${SAFE_PROJECT} + name: router-logs + spec: + ports: + - name: udp-input-module + port: 5140 + protocol: UDP + targetPort: 5140 + selector: + service: ${SERVICE_NAME} \ No newline at end of file diff --git a/services/logs2logs-db/Dockerfile b/services/logs2logs-db/Dockerfile index 47f85bfe72..dc4fed4eb1 100644 --- a/services/logs2logs-db/Dockerfile +++ b/services/logs2logs-db/Dockerfile @@ -8,6 +8,7 @@ RUN rm -f pipeline/logstash.conf COPY pipeline/ pipeline/ COPY templates/ templates/ +COPY certs/ certs/ COPY pipelines.yml config/pipelines.yml # https://github.com/elastic/logstash-docker/issues/64 diff --git a/services/logs2logs-db/certs/lumberjack.cert b/services/logs2logs-db/certs/lumberjack.cert new file mode 100644 index 0000000000..36187e0e2c --- /dev/null +++ b/services/logs2logs-db/certs/lumberjack.cert @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICpDCCAYwCCQDrhUaboMuRdjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAls +b2NhbGhvc3QwHhcNMTgwNjI3MjMzMTQ3WhcNMjgwNjI0MjMzMTQ3WjAUMRIwEAYD +VQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDD +xQB0cmt3pstQVWkbpyd8AEtsRtt+f4GLMwVdsG37hRXw+xknMGQWKsIKtxpwShfY +hC6YCaS3ZEkkqctyHZgVJDORe9XmSp+IuFP04Ak8qF/ZfHAaseEysaTRHXJP4YeB +jy7q3ehUGy4DGJimuzkFxc1P02Nk4p0I6lx3+WRi+DwK6jtTOAPEMqQHJZqlQj07 +ZnCfY+Cw0xGy+g8JM+N+l2WRD4Dlhqtm7LdRhlKBG2okSec7s5FojjSkBTAS6wfs +tmhBuhvpS72RWIuUHAExwDjCs4/llRGGWCCUqyn6z6stFD6aF7YNsMy3Gy8UtJ0m +iB6zSxWX6flYKevT+rPhAgMBAAEwDQYJKoZIhvcNAQELBQADggEBACzjllluordk +u0+RJklTJzkJRXTstHnp3R7rNVv8GOqO9eTM0N0TeXHjp+LqMYHoA9ehvz+Pk1Z7 +7JlLyK4/OT7aysNVg/QgZCYOHqj6nGWmwKWjRz9r56DV/0vtdiiwKB7GqvhD7ZLn +W3qseIAzvKlzfwuocLKtBYVLD2llKv3iHiB6C2lRLnzRkYvZP2OgVUSoSNkagLiE +h4tTU1eAulpohjzgUFgv4nDvt6Sp+pa4IjY6Av2MssHoL/UN7X00Spgl6pcBVPc3 +JdoikJA2OWI/JgNtFvFsajHOI4+blcauN2C6E8VGWyCKSODSW8zUgq+TQoNJod20 +79ImYvZ2k1M= +-----END CERTIFICATE----- diff --git a/services/logs2logs-db/certs/lumberjack.key b/services/logs2logs-db/certs/lumberjack.key new file mode 100644 index 0000000000..fcd388f19d --- /dev/null +++ b/services/logs2logs-db/certs/lumberjack.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDxQB0cmt3pstQ +VWkbpyd8AEtsRtt+f4GLMwVdsG37hRXw+xknMGQWKsIKtxpwShfYhC6YCaS3ZEkk +qctyHZgVJDORe9XmSp+IuFP04Ak8qF/ZfHAaseEysaTRHXJP4YeBjy7q3ehUGy4D +GJimuzkFxc1P02Nk4p0I6lx3+WRi+DwK6jtTOAPEMqQHJZqlQj07ZnCfY+Cw0xGy ++g8JM+N+l2WRD4Dlhqtm7LdRhlKBG2okSec7s5FojjSkBTAS6wfstmhBuhvpS72R +WIuUHAExwDjCs4/llRGGWCCUqyn6z6stFD6aF7YNsMy3Gy8UtJ0miB6zSxWX6flY +KevT+rPhAgMBAAECggEAJihx7ECZHdtO6bucHx23nn3YNaSiB8gGkgFB3H8VEN9h +5XSr9G2ENKXk6H3EE3r9puAszMmkZ3KOpyzJUNbf288QlhzTFm4ZJmb3RlWct0wC +Urvkhe+B+4WQoInizBsjdGdaN8FFFY+uqj/TETyQ4sUpPZPwXaul/CTiHShpHrj4 +eQWfk1sFydpIee39nkPKdvD0anSteQ/LsF2twi3iJW1q7Ao1BfevvooXEiLW1hSv +cOGVF5R9OAkhxdmmvzOgz8nHjOKTnHXwnP/bKhB60+YZs9/dvxkI48vwzAIbW7Mv +jBMYbMYTwlUYxhXseyCW9JrZRPO8VsHMWdH3g59QGQKBgQDnion8C7bUdoeXyxm0 +2U9NlFLdrWwzVeDOFxI0TarShdl1Q3l3XDtENe9B79AZaywC10gLdD8zCg/og5e7 +hgFN+w9muXmedqy7jz71g1hlBV9d/5cBONyY5PLwQ6hE5gU0LdQv5iC4Zh3nZ9xp +2Yl7HJojrqVsRyi1oFWWk1hS7wKBgQDYcxtqWONSPKHSjZpQ45vdXGBXwbpGHFqU +uwWDHhAN0ava3tGJbr8Vdfk3u2AwVFn9VZBtu3qGcKSovDIK6zeQbmn5qbekYtaK +BpRkcj/N8mbqMaosLOKjGZ90iTo4S0prd+woMognsRiVze225qfI1W1IGUTWwgWz +jYmDrMwmLwKBgGI4ccpayccdNO3eeVwvZyW4yuJAxAqO3ptuTAml/NXrc9D7u5Y3 +qOiJnKIy8esmfHgEDQrW2Ex1c0QmNoKVGQ6UdvcTrRTcsO+rl4CcpC3OdqMwXkVl +cqblUG09ti7sCL8WxdE/5Rd0MjFM7MJ2RozUGEsUthXVpd5OUqBahvg/AoGAWjoJ +aJhrEiHSNwnuFN/8Xhce1hYchxNUswEO+Cw09v27DL6rTAXNQ63s2E2UeDEBXjgn +v4ZSOe6ExQ3GMoUAoYVVScx0j+LfmaPKgeCaNkhgnCldzZJcWy7YKGT3zvWbrRbN +a6g814bcP74a5I5bzL0jE9XsuzUf2Uwb0bbbVuMCgYBL7fzgiOZj4qRHPPp1ZUkj +VoXzmXrhy4PupUdTj3e+wIXh38ad9gWwAmsgy0ASgg6TjB8QF6DtSDe00F9KxkVh +C5usXV21S0oIaaGRz6+NauzSfB7MKCnQVB2NCWbrtardhe/ToTbXFLUyYV0ZZaFg +Dh/sna27+AeJ1SHHEhSeXA== +-----END PRIVATE KEY----- diff --git a/services/logs2logs-db/pipeline/router-logs.conf b/services/logs2logs-db/pipeline/router-logs.conf index 5b597120f6..d84b1f5f7a 100644 --- a/services/logs2logs-db/pipeline/router-logs.conf +++ b/services/logs2logs-db/pipeline/router-logs.conf @@ -5,10 +5,11 @@ input { queue_size => 5000 receive_buffer_bytes => 26214400 } - http { - id => "http_input" - user => "${LOGSTASH_USERNAME}" - password => "${LOGSTASH_PASSWORD}" + beats { + port => 5044 + ssl => true + ssl_certificate => "certs/lumberjack.cert" + ssl_key => "certs/lumberjack.key" } } @@ -33,6 +34,7 @@ filter { } output { + stdout { codec => rubydebug } elasticsearch { user => admin password => "${LOGSDB_ADMIN_PASSWORD}" diff --git a/services/openshiftbuilddeploy/src/index.js b/services/openshiftbuilddeploy/src/index.js index f9e1774193..0b6dcf308c 100644 --- a/services/openshiftbuilddeploy/src/index.js +++ b/services/openshiftbuilddeploy/src/index.js @@ -44,10 +44,11 @@ const messageConsumer = async msg => { var projectId = projectOpenShift.id var openshiftConsole = projectOpenShift.openshift.console_url.replace(/\/$/, ""); var openshiftToken = projectOpenShift.openshift.token || "" - var openshiftProject = `${safeProjectName}-${safeBranchName}` + var openshiftProject = projectOpenShift.openshift.project_pattern ? projectOpenShift.openshift.project_pattern.replace('${branch}',safeBranchName).replace('${project}', safeProjectName) : `${safeProjectName}-${safeBranchName}` var openshiftProjectUser = projectOpenShift.openshift.project_user || "" var deployPrivateKey = projectOpenShift.customer.private_key var gitUrl = projectOpenShift.git_url + var subfolder = projectOpenShift.subfolder || "" var routerPattern = projectOpenShift.openshift.router_pattern ? projectOpenShift.openshift.router_pattern.replace('${branch}',safeBranchName).replace('${project}', safeProjectName) : "" var prHeadBranchName = headBranchName || "" var prHeadSha = headSha || "" @@ -145,6 +146,10 @@ const messageConsumer = async msg => { "name": "GIT_REF", "value": gitRef }, + { + "name": "SUBFOLDER", + "value": subfolder + }, { "name": "SAFE_BRANCH", "value": safeBranchName diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/.dockerignore b/tests/files/features-subfolder/subfolder1/subfolder2/.dockerignore new file mode 100644 index 0000000000..b512c09d47 --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/.dockerignore @@ -0,0 +1 @@ +node_modules \ No newline at end of file diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/.lagoon.yml b/tests/files/features-subfolder/subfolder1/subfolder2/.lagoon.yml new file mode 100644 index 0000000000..c76c3c466a --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/.lagoon.yml @@ -0,0 +1 @@ +docker-compose-yaml: docker-compose.yml diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/Dockerfile b/tests/files/features-subfolder/subfolder1/subfolder2/Dockerfile new file mode 100644 index 0000000000..51474cf2dc --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/Dockerfile @@ -0,0 +1,18 @@ +ARG IMAGE_REPO +FROM ${IMAGE_REPO:-amazeeio}/node:8-builder as builder +COPY package.json yarn.lock /app/ +RUN yarn install + +FROM ${IMAGE_REPO:-amazeeio}/node:8 +COPY --from=builder /app/node_modules /app/node_modules +COPY . /app/ + +ARG LAGOON_GIT_SHA=0000000000000000000000000000000000000000 +ENV LAGOON_GIT_SHA_BUILDTIME ${LAGOON_GIT_SHA} + +ARG LAGOON_GIT_BRANCH=undefined +ENV LAGOON_GIT_BRANCH_BUILDTIME ${LAGOON_GIT_BRANCH} + +EXPOSE 3000 + +CMD ["yarn", "run", "start"] diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/docker-compose.yml b/tests/files/features-subfolder/subfolder1/subfolder2/docker-compose.yml new file mode 100644 index 0000000000..65ac501f67 --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/docker-compose.yml @@ -0,0 +1,23 @@ +version: '2' +services: + node: + networks: + - amazeeio-network + - default + build: + context: . + dockerfile: Dockerfile + labels: + lagoon.type: node-persistent + lagoon.persistent: /files + volumes: + - ./index.js:/app/index.js:delegated + expose: + - "3000" + environment: + - AMAZEEIO_URL=node.docker.amazee.io + - AMAZEEIO=AMAZEEIO + - AMAZEEIO_HTTP_PORT=3000 +networks: + amazeeio-network: + external: true \ No newline at end of file diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/index.js b/tests/files/features-subfolder/subfolder1/subfolder2/index.js new file mode 100644 index 0000000000..dcff72e782 --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/index.js @@ -0,0 +1,24 @@ +const express = require('express') +const fs = require('fs'); +const app = express() + +app.get('/', async function (req, res) { + let result = [] + Object.keys(process.env).map(key => { + result.push(`${key}=${process.env[key]}`) + }) + result.sort() + + try { + result.push(fs.readFileSync('/files/cron.txt').toString()); + } catch (e) { + // intentionally left empty + } + + res.send(result.join("
")) + +}) + +app.listen(3000, function () { + console.log('Example app listening on port 3000!') +}) diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/package.json b/tests/files/features-subfolder/subfolder1/subfolder2/package.json new file mode 100644 index 0000000000..b577e5e022 --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/package.json @@ -0,0 +1,12 @@ +{ + "name": "node", + "version": "1.0.0", + "main": "index.js", + "license": "MIT", + "dependencies": { + "express": "^4.15.3" + }, + "scripts": { + "start": "node index.js" + } +} diff --git a/tests/files/features-subfolder/subfolder1/subfolder2/yarn.lock b/tests/files/features-subfolder/subfolder1/subfolder2/yarn.lock new file mode 100644 index 0000000000..d9c4cd4621 --- /dev/null +++ b/tests/files/features-subfolder/subfolder1/subfolder2/yarn.lock @@ -0,0 +1,247 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +accepts@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca" + dependencies: + mime-types "~2.1.11" + negotiator "0.6.1" + +array-flatten@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" + +content-disposition@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4" + +content-type@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed" + +cookie-signature@1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" + +cookie@0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb" + +debug@2.6.7: + version "2.6.7" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.7.tgz#92bad1f6d05bbb6bba22cca88bcd0ec894c2861e" + dependencies: + ms "2.0.0" + +depd@1.1.0, depd@~1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3" + +destroy@~1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + +encodeurl@~1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20" + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + +etag@~1.8.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051" + +express@^4.15.3: + version "4.15.3" + resolved "https://registry.yarnpkg.com/express/-/express-4.15.3.tgz#bab65d0f03aa80c358408972fc700f916944b662" + dependencies: + accepts "~1.3.3" + array-flatten "1.1.1" + content-disposition "0.5.2" + content-type "~1.0.2" + cookie "0.3.1" + cookie-signature "1.0.6" + debug "2.6.7" + depd "~1.1.0" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.8.0" + finalhandler "~1.0.3" + fresh "0.5.0" + merge-descriptors "1.0.1" + methods "~1.1.2" + on-finished "~2.3.0" + parseurl "~1.3.1" + path-to-regexp "0.1.7" + proxy-addr "~1.1.4" + qs "6.4.0" + range-parser "~1.2.0" + send "0.15.3" + serve-static "1.12.3" + setprototypeof "1.0.3" + statuses "~1.3.1" + type-is "~1.6.15" + utils-merge "1.0.0" + vary "~1.1.1" + +finalhandler@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.3.tgz#ef47e77950e999780e86022a560e3217e0d0cc89" + dependencies: + debug "2.6.7" + encodeurl "~1.0.1" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.1" + statuses "~1.3.1" + unpipe "~1.0.0" + +forwarded@~0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363" + +fresh@0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e" + +http-errors@~1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.1.tgz#5f8b8ed98aca545656bf572997387f904a722257" + dependencies: + depd "1.1.0" + inherits "2.0.3" + setprototypeof "1.0.3" + statuses ">= 1.3.1 < 2" + +inherits@2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" + +ipaddr.js@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.3.0.tgz#1e03a52fdad83a8bbb2b25cbf4998b4cffcd3dec" + +media-typer@0.3.0: + version "0.3.0" + resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" + +merge-descriptors@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" + +methods@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" + +mime-db@~1.27.0: + version "1.27.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.27.0.tgz#820f572296bbd20ec25ed55e5b5de869e5436eb1" + +mime-types@~2.1.11, mime-types@~2.1.15: + version "2.1.15" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.15.tgz#a4ebf5064094569237b8cf70046776d09fc92aed" + dependencies: + mime-db "~1.27.0" + +mime@1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.3.4.tgz#115f9e3b6b3daf2959983cb38f149a2d40eb5d53" + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + +negotiator@0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.1.tgz#2b327184e8992101177b28563fb5e7102acd0ca9" + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + dependencies: + ee-first "1.1.1" + +parseurl@~1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.1.tgz#c8ab8c9223ba34888aa64a297b28853bec18da56" + +path-to-regexp@0.1.7: + version "0.1.7" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" + +proxy-addr@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-1.1.4.tgz#27e545f6960a44a627d9b44467e35c1b6b4ce2f3" + dependencies: + forwarded "~0.1.0" + ipaddr.js "1.3.0" + +qs@6.4.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.4.0.tgz#13e26d28ad6b0ffaa91312cd3bf708ed351e7233" + +range-parser@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.0.tgz#f49be6b487894ddc40dcc94a322f611092e00d5e" + +send@0.15.3: + version "0.15.3" + resolved "https://registry.yarnpkg.com/send/-/send-0.15.3.tgz#5013f9f99023df50d1bd9892c19e3defd1d53309" + dependencies: + debug "2.6.7" + depd "~1.1.0" + destroy "~1.0.4" + encodeurl "~1.0.1" + escape-html "~1.0.3" + etag "~1.8.0" + fresh "0.5.0" + http-errors "~1.6.1" + mime "1.3.4" + ms "2.0.0" + on-finished "~2.3.0" + range-parser "~1.2.0" + statuses "~1.3.1" + +serve-static@1.12.3: + version "1.12.3" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.12.3.tgz#9f4ba19e2f3030c547f8af99107838ec38d5b1e2" + dependencies: + encodeurl "~1.0.1" + escape-html "~1.0.3" + parseurl "~1.3.1" + send "0.15.3" + +setprototypeof@1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.0.3.tgz#66567e37043eeb4f04d91bd658c0cbefb55b8e04" + +"statuses@>= 1.3.1 < 2", statuses@~1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.3.1.tgz#faf51b9eb74aaef3b3acf4ad5f61abf24cb7b93e" + +type-is@~1.6.15: + version "1.6.15" + resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.15.tgz#cab10fb4909e441c82842eafe1ad646c81804410" + dependencies: + media-typer "0.3.0" + mime-types "~2.1.15" + +unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + +utils-merge@1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.0.tgz#0294fb922bb9375153541c4f7096231f287c8af8" + +vary@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.1.tgz#67535ebb694c1d52257457984665323f587e8d37" diff --git a/tests/tests/features.yaml b/tests/tests/features.yaml index 79e15c7e3c..da4c3131e7 100644 --- a/tests/tests/features.yaml +++ b/tests/tests/features.yaml @@ -1,26 +1,26 @@ --- -# - include: features/api-token.yaml -# vars: -# testname: "API TOKEN" +- include: features/api-token.yaml + vars: + testname: "API TOKEN" -# - include: features/remote-shell.yaml -# vars: -# testname: "REMOTE SHELL" -# node_version: 8 -# git_repo_name: features.git -# project: ci-features -# branch: remoteshell +- include: features/remote-shell.yaml + vars: + testname: "REMOTE SHELL" + node_version: 8 + git_repo_name: features.git + project: ci-features + branch: remoteshell -# - include: features/promote.yaml -# vars: -# testname: "PROMOTE" -# git_repo_name: features.git -# project: ci-features -# source_environment: source -# promote_environment: target -# check_url_source: "http://node.{{ project | regex_replace('_', '-') }}.{{ source_environment | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# check_url_promote: "http://node.{{ project | regex_replace('_', '-') }}.{{ promote_environment | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/promote.yaml + vars: + testname: "PROMOTE" + git_repo_name: features.git + project: ci-features + source_environment: source + promote_environment: target + check_url_source: "http://node.{{ project | regex_replace('_', '-') }}.{{ source_environment | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" + check_url_promote: "http://node.{{ project | regex_replace('_', '-') }}.{{ promote_environment | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" - include: features/cronjobs.yaml vars: @@ -30,64 +30,72 @@ branch: branch/cronjob check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/multiproject.yaml -# vars: -# testname: "MULTIPROJECT - two projects with same git url" -# git_repo_name: multiproject.git -# project1: ci-multiproject1 -# project2: ci-multiproject2 -# branch: multiproject -# check_url1: "http://node.{{ project1 | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# check_url2: "http://node.{{ project2 | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/multiproject.yaml + vars: + testname: "MULTIPROJECT - two projects with same git url" + git_repo_name: multiproject.git + project1: ci-multiproject1 + project2: ci-multiproject2 + branch: multiproject + check_url1: "http://node.{{ project1 | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" + check_url2: "http://node.{{ project2 | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/production-environment-protection.yaml -# vars: -# testname: "PROTECTED PRODUCTION ENVIRONMENT" -# git_repo_name: features.git -# project: ci-features -# branch: master -# check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/production-environment-protection.yaml + vars: + testname: "PROTECTED PRODUCTION ENVIRONMENT" + git_repo_name: features.git + project: ci-features + branch: master + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" + +- include: features/environment-type.yaml + vars: + testname: "ENVIRONMENT TYPE DEVELOPMENT" + git_repo_name: features.git + project: ci-features + branch: develop + environment_type: development + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/environment-type.yaml -# vars: -# testname: "ENVIRONMENT TYPE DEVELOPMENT" -# git_repo_name: features.git -# project: ci-features -# branch: develop -# environment_type: development -# check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/environment-type.yaml + vars: + testname: "ENVIRONMENT TYPE PRODUCTION" + git_repo_name: features.git + project: ci-features + branch: master + environment_type: production + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/environment-type.yaml -# vars: -# testname: "ENVIRONMENT TYPE PRODUCTION" -# git_repo_name: features.git -# project: ci-features -# branch: master -# environment_type: production -# check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/route-env-variables.yaml + vars: + testname: "ROUTE ENV VARIABLES" + git_repo_name: features.git + project: ci-features + branch: branch/routes + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/route-env-variables.yaml -# vars: -# testname: "ROUTE ENV VARIABLES" -# git_repo_name: features.git -# project: ci-features -# branch: branch/routes -# check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/dot-env.yaml + vars: + testname: "DOT-ENV VARIABLES" + git_repo_name: features.git + project: ci-features + branch: slash/branch1 + expected_dot_env: slash/branch1 + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/dot-env.yaml -# vars: -# testname: "DOT-ENV VARIABLES" -# git_repo_name: features.git -# project: ci-features -# branch: slash/branch1 -# expected_dot_env: slash/branch1 -# check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" +- include: features/dot-env.yaml + vars: + testname: "DOT-ENV VARIABLES" + git_repo_name: features.git + project: ci-features + branch: branch1 + expected_dot_env: branch1 + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" -# - include: features/dot-env.yaml -# vars: -# testname: "DOT-ENV VARIABLES" -# git_repo_name: features.git -# project: ci-features -# branch: branch1 -# expected_dot_env: branch1 -# check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" \ No newline at end of file +- include: features/subfolder.yaml + vars: + testname: "SUBFOLDER" + git_repo_name: features-subfolder.git + project: ci-features-subfolder + branch: branch1 + check_url: "http://node.{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" \ No newline at end of file diff --git a/tests/tests/features/subfolder.yaml b/tests/tests/features/subfolder.yaml new file mode 100644 index 0000000000..0dd18e7ae5 --- /dev/null +++ b/tests/tests/features/subfolder.yaml @@ -0,0 +1,43 @@ + +- name: "{{ testname }} - init git, add files, commit, git push" + hosts: localhost + serial: 1 + vars: + git_files: "features-subfolder/" + tasks: + - include: ../../tasks/git-init.yaml + - include: ../../tasks/git-add-commit-push.yaml + +- name: "{{ testname }} - rest2tasks deploy post for just git branch on {{ project }}" + hosts: localhost + serial: 1 + vars: + branch: "{{ branch }}" + project: "{{ project }}" + tasks: + - include: ../../tasks/rest/deploy-no-sha.yaml + +- include: ../../checks/check-branch-sha.yaml + vars: + expected_head: "{{ current_head }}" + expected_branch: "{{ branch }}" + project: "{{ project }}" + url: "{{ check_url }}" + +- name: "{{ testname }} - rest2tasks /remove POST on {{ project }}, which should remove all resources" + hosts: localhost + serial: 1 + vars: + project: "{{ project }}" + branch: "{{ branch }}" + tasks: + - include: ../../tasks/rest/remove.yaml + +- name: "{{ testname }} - check if site for {{ project }} does not exist anymore" + hosts: localhost + serial: 1 + vars: + url: "http://{{ project | regex_replace('_', '-') }}.{{ branch | regex_replace('/', '-') }}.{{ lookup('env','OPENSHIFT_ROUTE_SUFFIX') }}" + expected_returncode: 503 + tasks: + - include: ../../checks/check-url-returncode.yaml