diff --git a/.gitignore b/.gitignore index 8b2d48dfa7..855ffd40c2 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,7 @@ compile_commands.json *.tfstate *.tfstate.backup *_override.tf + +# Habitat +results + diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..fd18203670 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,128 @@ +# Configurable environment variables: +# HAB_ORIGIN - denotes the docker origin (dockerhub ID) +# VERSION - the version identifier tag on the packages +# AUTOMATE_ENABLED - enable the Automate data collector (true or false) +# AUTOMATE_SERVER - the IP address or hostname of the Automate server +# AUTOMATE_TOKEN - the token for the Automate server data collector +# USER_ID - the user ID to use +# GROUP_ID - the group ID to use + +version: '2.1' +services: + postgresql: + image: chefdemo/postgresql:stable + environment: + HAB_POSTGRESQL: | + [superuser] + name = 'hab' + password = 'chefrocks' + volumes: + - postgresql-data:/hab/svc/postgresql/data + + chef-server-ctl: + image: ${HAB_ORIGIN:-chefserverofficial}/chef-server-ctl:${VERSION:-latest} + user: ${USER_ID:-42}:${GROUP_ID:-42} + cap_drop: + - NET_BIND_SERVICE + - SETUID + - SETGID + command: --peer postgresql + environment: + HAB_CHEF_SERVER_CTL: | + [chef_server_api] + ip = '${HOST_IP:-172.17.0.1}' + [secrets.data_collector] + token = "${AUTOMATE_TOKEN:-93a49a4f2482c64126f7b6015e6b0f30284287ee4054ff8807fb63d9cbd1c506}" + + elasticsearch: + image: chefdemo/elasticsearch:stable + command: --peer postgresql + ulimits: + nofile: + soft: 65536 + hard: 65536 + volumes: + - elasticsearch-data:/hab/svc/elasticsearch/data + + oc_id: + image: ${HAB_ORIGIN:-chefserverofficial}/oc_id:${VERSION:-latest} + user: ${USER_ID:-42}:${GROUP_ID:-42} + cap_drop: + - NET_BIND_SERVICE + - SETUID + - SETGID + command: --peer postgresql + --bind database:postgresql.default + --bind chef-server-ctl:chef-server-ctl.default + + bookshelf: + image: ${HAB_ORIGIN:-chefserverofficial}/bookshelf:${VERSION:-latest} + user: ${USER_ID:-42}:${GROUP_ID:-42} + cap_drop: + - NET_BIND_SERVICE + - SETUID + - SETGID + command: --peer postgresql + --bind database:postgresql.default + --bind chef-server-ctl:chef-server-ctl.default + + oc_bifrost: + image: ${HAB_ORIGIN:-chefserverofficial}/oc_bifrost:${VERSION:-latest} + user: ${USER_ID:-42}:${GROUP_ID:-42} + cap_drop: + - NET_BIND_SERVICE + - SETUID + - SETGID + command: --peer postgresql + --bind database:postgresql.default + --bind chef-server-ctl:chef-server-ctl.default + + oc_erchef: + image: ${HAB_ORIGIN:-chefserverofficial}/oc_erchef:${VERSION:-latest} + user: ${USER_ID:-42}:${GROUP_ID:-42} + cap_drop: + - NET_BIND_SERVICE + - SETUID + - SETGID + command: --peer postgresql + --bind bookshelf:bookshelf.default + --bind oc_bifrost:oc_bifrost.default + --bind database:postgresql.default + --bind elasticsearch:elasticsearch.default + --bind chef-server-ctl:chef-server-ctl.default + environment: + HAB_OC_ERCHEF: | + [chef_authn] + keygen_cache_workers=2 + keygen_cache_size=10 + keygen_start_size=0 + keygen_timeout=20000 + [data_collector] + enabled = ${AUTOMATE_ENABLED:-false} + server = "${AUTOMATE_SERVER:-localhost}" + port = 443 + + chef-server-nginx: + image: ${HAB_ORIGIN:-chefserverofficial}/chef-server-nginx:${VERSION:-latest} + user: ${USER_ID:-42}:${GROUP_ID:-42} + cap_drop: + - NET_BIND_SERVICE + - SETUID + - SETGID + command: --peer postgresql + --bind oc_erchef:oc_erchef.default + --bind oc_bifrost:oc_bifrost.default + --bind oc_id:oc_id.default + --bind bookshelf:bookshelf.default + --bind elasticsearch:elasticsearch.default + --bind chef-server-ctl:chef-server-ctl.default + volumes: + - nginx-data:/hab/svc/chef-server-nginx/data + ports: + - 80:8080 + - 443:8443 + +volumes: + postgresql-data: + elasticsearch-data: + nginx-data: diff --git a/habitat_pkgs_build.sh b/habitat_pkgs_build.sh new file mode 100755 index 0000000000..719799705c --- /dev/null +++ b/habitat_pkgs_build.sh @@ -0,0 +1,15 @@ +#!/bin/bash -e + +# this script builds all the essential habitat packages for running Chef Server +# additionaly, it exports them as a local docker image + +CHEF_SERVER_SRC='/src/src' + +for dir in dbdpg oc-id openresty-noroot nginx bookshelf chef-server-ctl oc_bifrost oc_erchef; do + cd $CHEF_SERVER_SRC/$dir + echo "[STATUS] building $dir" + build > /var/log/build-${dir}-$(date +%s).log + if [[ $dir =~ dbdpg ]]; then continue; fi + echo "[STATUS] exporting $dir pkg to docker daemon" + hab pkg export docker -i "chefserverofficial/{{pkg_name}}" $(ls -1t results/*.hart | head -1) +done diff --git a/oc-chef-pedant/lib/pedant.rb b/oc-chef-pedant/lib/pedant.rb index 4d5dbd8f51..921f2ff15f 100644 --- a/oc-chef-pedant/lib/pedant.rb +++ b/oc-chef-pedant/lib/pedant.rb @@ -16,6 +16,7 @@ #Override the connect() method to set appropriate IPV6 host headers require 'pedant/core_ext/net_http' +require 'uri' require 'pp' # Debugging require 'rspec' @@ -48,6 +49,13 @@ def self.config Encoding.default_external = Encoding::UTF_8 Encoding.default_internal = Encoding::UTF_8 + # The URI gets normalized many places in the chain from pedant to erchef; in particular redundant port + # specifications (e.g 443 for https) are stripped out. We normalize the URI here to make sure that the + # specs we check against conform to that requirement. + if Config.has_key?(:chef_server) + # chomp is to strip the trailing slash, which while technically correct, is improperly handled when we construct our specs + Config[:chef_server] = URI.parse(Config[:chef_server]).normalize.to_s.chomp('/') + end Config end diff --git a/src/bookshelf/habitat/config/database-migrations.sh b/src/bookshelf/habitat/config/database-migrations.sh new file mode 100644 index 0000000000..602bc69034 --- /dev/null +++ b/src/bookshelf/habitat/config/database-migrations.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e + +# TODO: not sure how to handle this. Sqitch bombs when it can't find the timezone +export TZ="UTC" + +export PERL5LIB="$(cat "$(hab pkg path "chef-server/dbdpg")/LD_RUN_PATH")" +export PATH=$PATH:$(hab pkg path "core/postgresql")/bin:$(hab pkg path "core/sqitch")/bin + +{{#if bind.database}} + {{#eachAlive bind.database.members as |member|}} + {{#if @last}} +HOST="{{member.sys.ip}}" +PORT="{{member.cfg.port}}" +USER="{{member.cfg.superuser_name}}" +PASS="{{member.cfg.superuser_password}}" +DB="bookshelf" + {{/if}} + {{/eachAlive}} +{{else}} +HOST="{{cfg.postgresql.vip}}" +PORT="{{cfg.postgresql.port}}" +USER="{{cfg.sql_user}}" +PASS="{{cfg.sql_password}}" +DB="bookshelf" +{{/if}} + +PG_ARGS="--host "$HOST" --port "$PORT" --username "$USER"" +export PGPASSWORD="$PASS" + +# Wait until postgres is ready +until pg_isready $PG_ARGS --quiet; do :; done + +# Create delivery db for sqitch to deploy to +createdb $PG_ARGS $DB "bookshelf" + +# Install uuid-ossp extension +psql $PG_ARGS --command 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"' $DB + +cd "{{pkg.path}}/schema" || exit +sqitch --quiet --engine pg deploy "db:pg://${USER}:${PASS}@${HOST}/$DB" diff --git a/src/bookshelf/habitat/config/sys.config b/src/bookshelf/habitat/config/sys.config new file mode 100644 index 0000000000..3e0578e2ac --- /dev/null +++ b/src/bookshelf/habitat/config/sys.config @@ -0,0 +1,147 @@ +%% -*- mode: erlang -*- +%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*- +%% ex: ts=4 sw=4 ft=erlang et + +[ + {kernel, [{inet_dist_use_interface, {127,0,0,1}}]}, + %% SASL config + {sasl, [ + {sasl_error_logger, {file, "{{pkg.svc_var_path}}/logs/sasl-error.log"}}, + {errlog_type, error}, + {error_logger_mf_dir, "{{pkg.svc_var_path}}/logs"}, % Log directory + {error_logger_mf_maxbytes, 104857600}, + {error_logger_mf_maxfiles, 10} + ]}, + {lager, [ + %% What handlers to install with what arguments + %% The defaults for the logfiles are to rotate the files when + %% they reach 10Mb or at midnight, whichever comes first, and keep + %% the last 5 rotations. See the lager README for a description of + %% the time rotation format: + %% https://github.com/basho/lager/blob/master/README.md#internal-log-rotation + %% + %% If you wish to disable rotation, you can either set the size to 0 + %% and the rotation time to "", or instead specify a 2-tuple that only + %% consists of {Logfile, Level}. + {handlers, [ + {lager_console_backend, [{{cfg.lager.lager_console_backend}}, {lager_default_formatter, [ "[", severity, "] ", message, "\n"]}]}, + {lager_file_backend, [ + {file, "{{pkg.svc_var_path}}/logs/error.log"}, + {level, error}, + {size, 104857600}, + {date, "$D0"}, + {count, 10} + ]}, + {lager_file_backend, [ + {file, "{{pkg.svc_var_path}}/logs/console.log"}, + {level, info}, + {size, 104857600}, + {date, "$D0"}, + {count, 10}, + %% Removes the logging source, since currently + %% there's only one place in all of Bifrost that + %% we log from. Also removes the PID from + %% the default message format. + {formatter_config, [date, " ", time, " [", severity, "] ", message, "\n"]} + ]} + ]}, + + %% Whether to write a crash log, and where. + %% Commented/omitted/undefined means no crash logger. + {crash_log, "{{pkg.svc_var_path}}/logs/crash.log"}, + + %% Maximum size in bytes of events in the crash log - defaults to 65536 + {crash_log_msg_size, 65536}, + + %% Maximum size of the crash log in bytes, before its rotated, set + %% to 0 to disable rotation - default is 0 + {crash_log_size, 104857600}, + + %% What time to rotate the crash log - default is no time + %% rotation. See the lager README for a description of this format: + %% https://github.com/basho/lager/blob/master/README.org + {crash_log_date, "$D0"}, + + %% Number of rotated crash logs to keep, 0 means keep only the + %% current one - default is 0 + {crash_log_count, 10}, + + %% Whether to redirect error_logger messages into lager - defaults to true + {error_logger_redirect, true}, + + %% Bump up the "high-water mark" (default 50), which is the + %% number of messages per second allowed to come from + %% error_logger. This is the same as used by + %% opscode-chef-mover, FWIW. + {error_logger_hwm, 1000} + ]}, + {chef_secrets, [ + {provider, chef_secrets_env} + ]}, + {bookshelf, [{ip, "0.0.0.0"}, + {port, {{cfg.port}} }, + {storage_type, sql}, + {disk_store, "{{pkg.svc_data_path}}"}, %% prob obsolete, revisit + {sql_retry_count, {{cfg.bookshelf.sql_retry_count}} }, + {sql_retry_delay, {{cfg.bookshelf.sql_retry_delay}} }, + {abandoned_upload_cleanup_interval, {{cfg.bookshelf.abandoned_upload_cleanup_interval}} }, + {deleted_data_cleanup_interval, {{cfg.bookshelf.deleted_data_cleanup_interval}} }, + {stream_download, {{cfg.bookshelf.stream_download}} }, + {log_dir, "{{pkg.svc_var_path}}/log"} + ]}, + {sqerl, [ + {db_driver_mod, sqerl_pgsql_client}, + {ip_mode, [ ipv4 ] }, +{{#if bind.database ~}} + {{#eachAlive bind.database.members as |member| ~}} + {{~#if @last}} + {db_host, "{{member.sys.ip}}"}, + {db_port, {{member.cfg.port}} }, + {db_user, "{{member.cfg.superuser_name}}"}, + {db_pass, "{{member.cfg.superuser_password}}" }, + {{/if ~}} + {{/eachAlive ~}} +{{~else}} + {db_host, "{{cfg.sqerl.db_host}}"}, + {db_port, {{cfg.sqerl.db_port}} }, + {db_user, "{{cfg.sqerl.db_user}}"}, + {db_pass, "{{cfg.sqerl.db_pass}}"}, +{{~/if}} + {db_name, "{{cfg.sqerl.db_name}}" }, + {idle_check, 10000}, + {pooler_timeout, {{cfg.sqerl.db_pooler_timeout}} }, + {db_timeout, {{cfg.sqerl.sql_db_timeout}} }, + {prepared_statements, {bksw_sql, statements, [pgsql]}}, + {column_transforms, + [{<<"created_at">>, + {sqerl_transformers, convert_YMDHMS_tuple_to_datetime}}, + {<<"updated_at">>, + {sqerl_transformers, convert_YMDHMS_tuple_to_datetime}}] + } + ]}, + {pooler, [ + {pools, [ +{{~#with cfg/pooler/sqerl }} + [{name, sqerl}, + {max_count, {{max_count}} }, + {init_count, {{init_count}} }, + {queue_max, {{queue_max}} }, + {start_mfa, {sqerl_client, start_link, []}}] +{{~/with ~}} + ]} + %%,{metrics_module, folsom_metrics} + ]}, + {webmachine, [ + {log_handlers, [ +%%% Log handling is disabled, because it is redundant (nginx also logs requests) +%%% If debug logging is needed, this can be uncommented to start logging somewhat verbose logs +%%% {oc_wm_request_logger, [ +%%% {file, "/var/logs/opscode/bookshelf/requests.log"}, +%%% {file_size, 100}, %% Size in MB +%%% {files, 5}, +%%% {annotations, [user, req_id]} +%%% ] +%%% } + ] + }]} +]. diff --git a/src/bookshelf/habitat/config/veil-secrets.json b/src/bookshelf/habitat/config/veil-secrets.json new file mode 100644 index 0000000000..cc9ade9211 --- /dev/null +++ b/src/bookshelf/habitat/config/veil-secrets.json @@ -0,0 +1,7 @@ +{{~ #if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} +{{toJson member.cfg.secrets}} + {{~ /if}} + {{~ /eachAlive}} +{{~ /if}} diff --git a/src/bookshelf/habitat/config/vm.args b/src/bookshelf/habitat/config/vm.args new file mode 100644 index 0000000000..de5e41d69f --- /dev/null +++ b/src/bookshelf/habitat/config/vm.args @@ -0,0 +1,36 @@ +## Name of the node +-name bookshelf@127.0.0.1 + +## Cookie for distributed erlang +-setcookie bookshelf + +## Add a directory to the code path where hot patches can be +## placed and easily accessed. +-pa lib/patches + +## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive +## (Disabled by default..use with caution!) +##-heart + +## Enable kernel poll ++K true + +## Increase maximum IO ports from default (65536) to 196608 +## This change helps bookshelf cope with bursty +## concurrent load +## Once we are on R16, we should use: +Q 196608 +-env ERL_MAX_PORTS 196608 + +## Increase maximum concurrent Erlang process from +## default (262144) to 524288 +## NOTE: This value MUST be a power of 2 +## N band (N - 1) == 0 ++P 524288 + +## Increase async worker thread pool size from +## R15 default of 0 to R16 default of 10. +## Async workers prevents IO from blocking the +## schedulers and increases performance of IO-heavy +## apps, like bookshelf. ++A 10 + diff --git a/src/bookshelf/habitat/default.toml b/src/bookshelf/habitat/default.toml new file mode 100644 index 0000000000..3fb2fb8552 --- /dev/null +++ b/src/bookshelf/habitat/default.toml @@ -0,0 +1,27 @@ +port=4321 + +[lager] +lager_console_backend="info" + +[bookshelf] +listen_ip="0.0.0.0" +listen_port=4321 +sql_retry_count=0 +sql_retry_delay=10 +abandoned_upload_cleanup_interval=1140000 +deleted_data_cleanup_interval=420000 +stream_download=true + +[sqerl] +db_host="localhost" +db_port=5432 +db_user="bookshelf" +db_pass="default_password" +db_name="bookshelf" +db_pooler_timeout=2000 +sql_db_timeout=5000 + +[pooler.sqerl] +max_count=20 +init_count=20 +queue_max=200 diff --git a/src/bookshelf/habitat/hooks/init b/src/bookshelf/habitat/hooks/init new file mode 100644 index 0000000000..e9c0b5c1a5 --- /dev/null +++ b/src/bookshelf/habitat/hooks/init @@ -0,0 +1,7 @@ +#!/bin/bash + +exec 2>&1 + +mkdir -p {{pkg.svc_var_path}}/logs + +source "{{pkg.svc_config_path}}/database-migrations.sh" diff --git a/src/bookshelf/habitat/hooks/run b/src/bookshelf/habitat/hooks/run new file mode 100644 index 0000000000..e86bbfcd38 --- /dev/null +++ b/src/bookshelf/habitat/hooks/run @@ -0,0 +1,10 @@ +#!/bin/bash +exec 2>&1 + +export HOME={{pkg.svc_var_path}} +export RUNNER_LOG_DIR="{{pkg.svc_var_path}}/logs" +export RELX_CONFIG_PATH="{{pkg.svc_config_path}}/sys.config" +export VMARGS_PATH="{{pkg.svc_config_path}}/vm.args" +export CHEF_SECRETS_DATA=$(cat {{pkg.svc_config_path}}/veil-secrets.json) + +exec bookshelf foreground diff --git a/src/bookshelf/habitat/plan.sh b/src/bookshelf/habitat/plan.sh new file mode 100644 index 0000000000..07731dc8a5 --- /dev/null +++ b/src/bookshelf/habitat/plan.sh @@ -0,0 +1,97 @@ +pkg_name=bookshelf +pkg_origin=chef-server +pkg_license=('Apache-2.0') +pkg_maintainer="The Chef Server Maintainers " +pkg_source="nosuchfile.tar.gz" +pkg_deps=( + core/erlang18 + core/cacerts + core/coreutils + core/gcc-libs + core/sqitch + chef-server/dbdpg +) +pkg_build_deps=(core/make core/git core/gcc) +pkg_lib_dirs=(lib) +pkg_include_dirs=(include) +pkg_bin_dirs=(bin) +pkg_description="Bookshelf is an S3 API compatible object store." +pkg_upstream_url="https://github.com/chef/chef-server" +pkg_exports=( + [port]="port" +) +pkg_exposes=(port) + +pkg_binds_optional=( + [database]="port" + [chef-server-ctl]="secrets" +) + +pkg_version() { + cat "$PLAN_CONTEXT/../../../VERSION" +} + +do_before() { + do_default_before + if [ ! -f "$PLAN_CONTEXT/../../../VERSION" ]; then + exit_with "Cannot find VERSION file! You must run \"hab studio enter\" from the chef-server project root." 56 + fi + update_pkg_version +} + +do_download() { + return 0 +} + +do_verify() { + return 0 +} + +do_unpack() { + # Copy everything over to the cache path so we don't write out our compiled + # deps into the working directory, but into the cache directory. + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname" + cp -R "$PLAN_CONTEXT/../"* "$HAB_CACHE_SRC_PATH/$pkg_dirname" +} + +do_prepare() { + # The `/usr/bin/env` path is hardcoded in jiffy, so we'll add a symlink since fix_interpreter won't work. + if [[ ! -r /usr/bin/env ]]; then + ln -sv "$(pkg_path_for coreutils)/bin/env" /usr/bin/env + _clean_env=true + fi + + # Need this for enterprise_ctl to compile with an old version of rebar + build_line "Setting PATH=$PATH:$HAB_CACHE_SRC_PATH/$pkg_dirname" + export PATH=$PATH:"$HAB_CACHE_SRC_PATH/$pkg_dirname" + + build_line "RELX_OUTPUT_DIR=$pkg_prefix" + export RELX_OUTPUT_DIR=$pkg_prefix + + git config --global http.sslCAInfo \ + "$(pkg_path_for core/cacerts)"/ssl/certs/cacert.pem + + fix_interpreter ./rebar3 core/coreutils bin/env +} + + +do_build() { + export REL_VERSION=$pkg_version + make omnibus +} + +do_install() { + cp -rv "_build/default/rel/bookshelf/"* "${pkg_prefix}" + cp -R "$HAB_CACHE_SRC_PATH/$pkg_dirname/schema" "$pkg_prefix" +} + +do_check() { + make +} + +do_end() { + # Clean up the `env` link, if we set it up. + if [[ -n "$_clean_env" ]]; then + rm -fv /usr/bin/env + fi +} diff --git a/src/chef-server-ctl/.gitignore b/src/chef-server-ctl/.gitignore new file mode 100644 index 0000000000..fbca225379 --- /dev/null +++ b/src/chef-server-ctl/.gitignore @@ -0,0 +1 @@ +results/ diff --git a/src/chef-server-ctl/Gemfile b/src/chef-server-ctl/Gemfile new file mode 100644 index 0000000000..40aff4eba3 --- /dev/null +++ b/src/chef-server-ctl/Gemfile @@ -0,0 +1,3 @@ +source "https://rubygems.org" + +gem 'toml' diff --git a/src/chef-server-ctl/Gemfile.local b/src/chef-server-ctl/Gemfile.local new file mode 100644 index 0000000000..8d9376af85 --- /dev/null +++ b/src/chef-server-ctl/Gemfile.local @@ -0,0 +1,2 @@ +gem 'chef' +gem 'json', '~>2.0' diff --git a/src/chef-server-ctl/Gemfile.lock b/src/chef-server-ctl/Gemfile.lock new file mode 100644 index 0000000000..22b8b15b1c --- /dev/null +++ b/src/chef-server-ctl/Gemfile.lock @@ -0,0 +1,17 @@ +GEM + remote: https://rubygems.org/ + specs: + blankslate (2.1.2.4) + parslet (1.5.0) + blankslate (~> 2.0) + toml (0.1.2) + parslet (~> 1.5.0) + +PLATFORMS + ruby + +DEPENDENCIES + toml + +BUNDLED WITH + 1.15.1 diff --git a/src/chef-server-ctl/habitat/bin/chef-server-ctl.sh b/src/chef-server-ctl/habitat/bin/chef-server-ctl.sh new file mode 100644 index 0000000000..e4cd7c764b --- /dev/null +++ b/src/chef-server-ctl/habitat/bin/chef-server-ctl.sh @@ -0,0 +1,33 @@ +#!/bin/bash -e +# +# Copyright 2012-2015 Chef Software, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +export SVWAIT=30 +# Ensure the calling environment (disapproval look Bundler) does not infect our +# Ruby environment if private-chef-ctl is called from a Ruby script. +unset RUBYOPT +unset BUNDLE_BIN_PATH +unset BUNDLE_GEMFILE +unset GEM_PATH +unset GEM_HOME + +# This uses a config file to find ourselves (and not hardcode our own package name) +# Could do relative to $0, but that can be messy sometimes +pkg_prefix=$(cat /hab/svc/chef-server-ctl/config/pkg_path)/omnibus-ctl +cd $pkg_prefix +bundler=$(hab pkg path "core/bundler") +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(hab pkg path "core/libffi")/lib +export CHEF_SECRETS_DATA=$(cat /hab/svc/chef-server-ctl/config/hab-secrets-config.json) +$bundler/bin/bundle exec chef-server-ctl opscode $pkg_prefix "$@" diff --git a/src/chef-server-ctl/habitat/bin/knife-pivotal.sh b/src/chef-server-ctl/habitat/bin/knife-pivotal.sh new file mode 100644 index 0000000000..a433dc0464 --- /dev/null +++ b/src/chef-server-ctl/habitat/bin/knife-pivotal.sh @@ -0,0 +1,6 @@ +#!/bin/bash -e + +# This uses a config file to find ourselves (and not hardcode our own package name) +# Could do relative to $0, but that can be messy sometimes +cd $(cat /hab/svc/chef-server-ctl/config/pkg_path)/chef +$(hab pkg path "core/bundler")/bin/bundle exec bin/knife $@ -c /hab/svc/chef-server-ctl/config/pivotal.rb diff --git a/src/chef-server-ctl/habitat/bin/oc-chef-pedant.sh b/src/chef-server-ctl/habitat/bin/oc-chef-pedant.sh new file mode 100644 index 0000000000..2e70dad89a --- /dev/null +++ b/src/chef-server-ctl/habitat/bin/oc-chef-pedant.sh @@ -0,0 +1,9 @@ +#!/bin/bash -e + +export SUPERUSER_KEY=`sed ':a;N;$!ba;s/\n/\\n/g' /hab/svc/chef-server-ctl/config/pivotal.pem` +export WEBUI_KEY=`sed ':a;N;$!ba;s/\n/\\n/g' /hab/svc/chef-server-ctl/config/webui_priv.pem` + +# This uses a config file to find ourselves (and not hardcode our own package name) +# Could do relative to $0, but that can be messy sometimes +cd $(cat /hab/svc/chef-server-ctl/config/pkg_path)/oc-chef-pedant +$(hab pkg path "core/bundler")/bin/bundle exec bin/oc-chef-pedant --log-file /dev/null -c /hab/svc/chef-server-ctl/config/pedant_config.rb --focus smoke diff --git a/src/chef-server-ctl/habitat/config/hab-secrets-config.json b/src/chef-server-ctl/habitat/config/hab-secrets-config.json new file mode 100644 index 0000000000..4a2c25cb3b --- /dev/null +++ b/src/chef-server-ctl/habitat/config/hab-secrets-config.json @@ -0,0 +1,3 @@ +{{~#with cfg}} +{{~toJson secrets }} +{{~/with}} diff --git a/src/chef-server-ctl/habitat/config/hab-secrets-config.toml b/src/chef-server-ctl/habitat/config/hab-secrets-config.toml new file mode 100644 index 0000000000..a1c26eea91 --- /dev/null +++ b/src/chef-server-ctl/habitat/config/hab-secrets-config.toml @@ -0,0 +1 @@ +{{toToml cfg}} diff --git a/src/chef-server-ctl/habitat/config/pedant_config.rb b/src/chef-server-ctl/habitat/config/pedant_config.rb new file mode 100644 index 0000000000..88b84d984e --- /dev/null +++ b/src/chef-server-ctl/habitat/config/pedant_config.rb @@ -0,0 +1,201 @@ +# This annotated Pedant configuration file details the various +# configuration settings available to you. It is separate from the +# actual Pedant::Config class because not all settings have sane +# defaults, and not all settings are appropriate in all settings. + +################################################################################ +# + +# The base_resource_url gets set whenever we are operating behind a proxy, +# notably in the automate-chef-server all in one installation +# +# base_resource_url "host_header" + +# A unique identification string used to create orgs and users specific +# to a each single chef server's nodes' OS. Simply using "Process.pid" +# proved to not be unique enough when running pedant simultaneously +# on multiple nodes of the same chef server when the generated pedant +# config file could have been copied across during the setup of that +# chef server. +chef_server_uid = "chef-server_#{Process.pid}".downcase + +# Specify a testing organization if you are testing a multi-tenant +# instance of a Chef Server (e.g., Private Chef, Hosted Chef). If you +# are testing a single-tenant instance (i.e. Open Source Chef Server), +# DO NOT include this parameter +# +# Due to how the current org cache operates, it is best to use a +# unique name for your testing organization. If you do not use a +# unique name and run tests several times (destroying the organization +# between runs) you will likely get inconsistent results. +# +# If you wish Pedant to create the organization for you at test time, +# include the `:create_me => true` pair. If you wish to use an +# existing organization for tests, you should supply a `:validator_key +# => "/full/path/to/key.pem"` pair +org({:name => "pedant_testorg_#{chef_server_uid}", + :create_me => true}) + +validate_org_creation true + +# org({:name => "existing_org", +# :validator_key => "/etc/opscode/existing_org-validator.pem"}) + +# account internal URL +internal_account_url "http://127.0.0.1:9685" + +# If you want Pedant to delete the testing organization when it is +# done, use this parameter. Note that this only has an effect if +# Pedant also created the testing organization. +delete_org true + +# You MUST specify the address of the server the API requests will be +# sent to. Only specify protocol, hostname, and port. +{{~#if bind.chef-server-nginx}} + {{~#eachAlive bind.chef-server-nginx.members as |member|}} + {{~#if @last}} +chef_server "https://{{member.sys.ip}}:{{member.cfg.ssl-port}}" + {{~/if}} + {{~/eachAlive}} +{{~else}} +chef_server "https://{{cfg.chef_server_api.ip}}:{{cfg.chef_server_api.ssl_port}}" +{{~/if}} + +# This configration specifies the default orgname. Note that it does *not* +# mean that Pedant will test things with default org urls. To do that, +# pass --use-default-org on the command line +default_orgname nil + +# If you are doing development testing, you can specify the address of +# the Solr server. The presence of this parameter will enable tests +# to force commits to Solr, greatly decreasing the amout of time +# needed for testing the search endpoint. This is only an +# optimization for development! If you are testing a "live" Chef +# Server, or otherwise do not have access to the Solr server from your +# testing location, you should not specify a value for this parameter. +# The tests will still run, albeit slower, as they will now need to +# poll for a period to ensure they are querying committed results. +search_server "http://elasticsearch:9200/" + +search_commit_url "/_refresh" +search_url_fmt "/chef/_search?q=X_CHEF_type_CHEF_X:%{type}%%20%{query}" + +# Some tests expect access erchef server directly, instead of routing through +# LB. +# +internal_server "http://oc_erchef:8000" + + +# Related to the 'search_server' parameter, this specifies the maximum +# amout of time (in seconds) that search endpoint requests should be +# retried before giving up. If not explicitly set, it will default to +# 65 seconds; only set it if you know that your Solr commit interval +# differs significantly from this. +maximum_search_time 65 + +# We're starting to break tests up into groups based on different +# criteria. The proper API tests (the results of which are viewable +# to OPC customers) should be the only ones run by Pedant embedded in +# OPC installs. There are other specs that help us keep track of API +# cruft that we want to come back and fix later; these shouldn't be +# viewable to customers, but we should be able to run them in +# development and CI environments. If this parameter is missing or +# explicitly `false` only the customer-friendly tests will be run. +# +# This is mainly here for documentation purposes, since the +# command-line `oc-chef-pedant` utility ultimately determines this +# value. +include_internal false + +# SSL protocol version to use for secure communications +# with the load balancer +ssl_version :TLSv1_2 + +# Test users. The five users specified below are required; their +# names (:user, :non_org_user, etc.) are indicative of their role +# within the tests. All users must have a ':name' key. If they have +# a ':create_me' key, Pedant will create these users for you. If you +# are using pre-existing users, you must supply a ':key_file' key, +# which should be the fully-qualified path /on the machine Pedant is +# running on/ to a private key for that user. + +superuser_name 'pivotal' +superuser_key '{{pkg.svc_config_path}}/pivotal.pem' +webui_key '{{pkg.svc_config_path}}/webui_priv.pem' +stats_user 'statsuser' + +suite "api" + +requestors({ + :clients => { + :admin => { + :name => "pedant_admin_client_#{chef_server_uid}", + :create_me => true, + :create_knife => true, + :admin => true + }, + :non_admin => { + :name => "pedant_client_#{chef_server_uid}", + :create_me => true, + :create_knife => true, + }, + :bad => { + :name => "bad_client_#{chef_server_uid}", + :create_me => true, + :bogus => true + } + }, + + :users => { + # An administrator in the testing organization + :admin => { + :name => "pedant_admin_user_#{chef_server_uid}", + :create_me => true, + :create_knife => true, + :admin => true + }, + + :non_admin => { + :name => "pedant_user_#{chef_server_uid}", + :create_me => true, + :create_knife => true, + :admin => false + }, + + # A user that is not a member of the testing organization + :bad => { + :name => "pedant_nobody_#{chef_server_uid}", + :create_me => true, + :create_knife => true, + :associate => false + }, + } + }) + +# To facilitate testing, we have added a org creation validation tests. +# These tests run before the main Pedant tests, so their output has been +# suppressed. To make it easier to debug org creation, you can turn this +# on and get the full output +debug_org_creation false + +# The behavior of these features depends on erchef configuration +search_acls? false + +old_runlists_and_search true + +# Default server api version for all requests that don't specify it. +server_api_version 0 + +# Actions enabled. Allowing tests that require actions to be bypassed if +# actions aren't available. +actions_enabled true + +# Enable/Disable tests if the required_recipe endpoint is turned on +required_recipe_enabled false + +# Log HTTP Requests +# log_file "/var/log/opscode/oc-chef-pedant/http-traffic.log" + +reindex_endpoint "https://127.0.0.1" + +chef_pgsql_collector true diff --git a/src/chef-server-ctl/habitat/config/pivotal.pem b/src/chef-server-ctl/habitat/config/pivotal.pem new file mode 100644 index 0000000000..2fa8522000 --- /dev/null +++ b/src/chef-server-ctl/habitat/config/pivotal.pem @@ -0,0 +1 @@ +{{cfg.secrets.chef-server.superuser_key}} diff --git a/src/chef-server-ctl/habitat/config/pivotal.rb b/src/chef-server-ctl/habitat/config/pivotal.rb new file mode 100644 index 0000000000..64ea40c928 --- /dev/null +++ b/src/chef-server-ctl/habitat/config/pivotal.rb @@ -0,0 +1,15 @@ +node_name "pivotal" +{{~#if bind.chef-server-nginx}} + {{~#eachAlive bind.chef-server-nginx.members as |member|}} + {{~#if @last}} +chef_server_url "https://{{member.sys.ip}}:{{member.cfg.ssl-port}}" +chef_server_root "https://{{member.sys.ip}}:{{member.cfg.ssl-port}}" + {{~/if}} + {{~/eachAlive}} +{{~else}} +chef_server_url "https://{{cfg.chef_server_api.ip}}:{{cfg.chef_server_api.ssl_port}}" +chef_server_root "https://{{cfg.chef_server_api.ip}}:{{cfg.chef_server_api.ssl_port}}" +{{~/if}} +no_proxy "127.0.0.1" +client_key "{{pkg.svc_config_path}}/pivotal.pem" +ssl_verify_mode :verify_none diff --git a/src/chef-server-ctl/habitat/config/pkg_path b/src/chef-server-ctl/habitat/config/pkg_path new file mode 100644 index 0000000000..4dea2ec414 --- /dev/null +++ b/src/chef-server-ctl/habitat/config/pkg_path @@ -0,0 +1 @@ +{{pkg.path}} diff --git a/src/chef-server-ctl/habitat/config/secrets-bootstrap.rb b/src/chef-server-ctl/habitat/config/secrets-bootstrap.rb new file mode 100644 index 0000000000..5158025e6d --- /dev/null +++ b/src/chef-server-ctl/habitat/config/secrets-bootstrap.rb @@ -0,0 +1,103 @@ +#!{{pkgPathFor "core/ruby"}}/bin/ruby + +require 'toml' +require 'openssl' +require 'securerandom' + +REQUIRED_SECRETS = { + postgresql: { + db_superuser_password: { length: 100 } + }, + redis_lb: { + password: { length: 100 } + }, + rabbitmq: { + password: { length: 100 }, + management_password: { length: 100 }, + actions_password: { length: 100 } + }, + keepalived: { + vrrp_instance_password: { length: 100 } + }, + opscode_erchef: { + sql_password: { length: 60 }, + sql_ro_password: { length: 60 }, + stats_password: { length: 100 } + }, + oc_bifrost: { + superuser_id: { length: 32 }, + sql_password: { length: 100 }, + sql_ro_password: { length: 100 } + }, + oc_id: { + secret_key_base: { length: 100 }, + sql_password: { length: 100 }, + sql_ro_password: { length: 100 } + }, + bookshelf: { + access_key_id: { length: 40 }, + secret_access_key: { length: 80 }, + sql_password: { length: 80 }, + sql_ro_password: { length: 80 } + }, + 'chef-server': { + superuser_id: { length: 32 }, + superuser_key: { length: 2048, type: 'rsa', private: true, pub_key_name: 'superuser_pub_key' }, + superuser_pub_key: { type: 'rsa', private: false }, + webui_key: { length: 2048, type: 'rsa', private: true, pub_key_name: 'webui_pub_key' }, + webui_pub_key: { type: 'rsa', private: false } + }, + 'push-jobs-server': { + pushy_priv_key: { length: 2048, type: 'rsa', private: true, pub_key_name: 'pushy_pub_key' }, + pushy_pub_key: { type: 'rsa', private: false }, + sql_password: { length: 60 }, + sql_ro_password: { length: 60 } + } +} + +def secrets_apply_loop + toml_cfg = TOML.load_file('/hab/svc/chef-server-ctl/config/hab-secrets-config.toml') + new_secrets = Marshal.load(Marshal.dump(toml_cfg)) + changes_to_apply = false + + toml_cfg.each do |top_level_item, top_level_item_value| + # guard against iterating through non-secrets key in default.toml + next unless top_level_item == 'secrets' + top_level_item_value.each do |service_item, service_item_value| + service_item_value.each do |key, pass| + if pass.empty? + changes_to_apply = true + if REQUIRED_SECRETS[service_item.to_sym][key.to_sym].has_key?(:type) && REQUIRED_SECRETS[service_item.to_sym][key.to_sym][:type] == 'rsa' + priv_key = OpenSSL::PKey::RSA.generate(2048) + pub_key = OpenSSL::PKey::RSA.new(priv_key).public_key.to_s + if REQUIRED_SECRETS[service_item.to_sym][key.to_sym][:private] + new_secrets[top_level_item][service_item][key] = priv_key.to_s + pub_key_name = REQUIRED_SECRETS[service_item.to_sym][key.to_sym][:pub_key_name] + new_secrets[top_level_item][service_item][pub_key_name] = pub_key + puts "Updated Private/Public Keypair for #{service_item}/#{key}" + end + else + length = REQUIRED_SECRETS[service_item.to_sym][key.to_sym][:length].to_i + new_secrets[top_level_item][service_item][key] = SecureRandom.hex(length)[1..length] + puts "Updated Empty Key/Value: #{service_item}/#{key} #{new_secrets[top_level_item][service_item][key]}" + end + end + end + end + end + + if changes_to_apply + puts "Changed Secrets need to be applied." + File.write('{{pkg.svc_data_path}}/hab-secrets-modified.toml', TOML::Generator.new(new_secrets).body) + version = Time.now.getutc.to_i + system "hab config apply chef-server-ctl.default #{version} {{pkg.svc_data_path}}/hab-secrets-modified.toml" + else + puts "Secrets Unchanged - nothing to do." + end +end + +# forever loop +loop do + secrets_apply_loop + sleep 15 +end diff --git a/src/chef-server-ctl/habitat/config/webui_priv.pem b/src/chef-server-ctl/habitat/config/webui_priv.pem new file mode 100644 index 0000000000..4c04bc62d6 --- /dev/null +++ b/src/chef-server-ctl/habitat/config/webui_priv.pem @@ -0,0 +1 @@ +{{cfg.secrets.chef-server.webui_key}} diff --git a/src/chef-server-ctl/habitat/default.toml b/src/chef-server-ctl/habitat/default.toml new file mode 100644 index 0000000000..c7131532b4 --- /dev/null +++ b/src/chef-server-ctl/habitat/default.toml @@ -0,0 +1,68 @@ +# Use this file to templatize your application's native configuration files. +# See the docs at https://www.habitat.sh/docs/create-packages-configure/. +# You can safely delete this file if you don't need it. +# + +[chef_server_api] +ip = "172.18.0.1" +port = "80" +ssl_port = "443" + +# the secrets namespace is initially initted to empty strings, +# the idea is that we loop through all of these and generate them at +# "bootstrap" time and then re-inject them into the config + +# NOTE: Additional keys have to also be added in the ruby hash here: config/secrets-bootstrap.rb + +[secrets] + +[secrets.postgresql] +db_superuser_password = "" + +[secrets.redis_lb] +password = "" + +[secrets.rabbitmq] +password = "" +management_password = "" +actions_password = "" + +[secrets.keepalived] +vrrp_instance_password = "" + +[secrets.opscode_erchef] +sql_password = "" +sql_ro_password = "" +stats_password = "" + +[secrets.oc_bifrost] +superuser_id = "" +sql_password = "" +sql_ro_password = "" + +[secrets.oc_id] +secret_key_base = "" +sql_password = "" +sql_ro_password = "" + +[secrets.bookshelf] +access_key_id = "" +secret_access_key = "" +sql_password = "" +sql_ro_password = "" + +[secrets.chef-server] +superuser_id = "" +superuser_key = "" +superuser_pub_key = "" +webui_key = "" +webui_pub_key = "" + +[secrets.push-jobs-server] +sql_password = "" +sql_ro_password = "" +pushy_priv_key = "" +pushy_pub_key = "" + +[secrets.data_collector] +token = "" diff --git a/src/chef-server-ctl/habitat/hooks/run b/src/chef-server-ctl/habitat/hooks/run new file mode 100644 index 0000000000..619104a370 --- /dev/null +++ b/src/chef-server-ctl/habitat/hooks/run @@ -0,0 +1,8 @@ +#!/bin/sh +exec 2>&1 + +{{#if cfg.fips_enabled ~}} +export OPENSSL_FIPS=1 +{{/if ~}} + +cd {{pkg.path}} && bundle exec ruby "{{pkg.svc_config_path}}/secrets-bootstrap.rb" diff --git a/src/chef-server-ctl/habitat/plan.sh b/src/chef-server-ctl/habitat/plan.sh new file mode 100644 index 0000000000..0ab203e4c3 --- /dev/null +++ b/src/chef-server-ctl/habitat/plan.sh @@ -0,0 +1,158 @@ +pkg_name=chef-server-ctl +pkg_origin=chef-server +pkg_maintainer="The Chef Server Maintainers " +pkg_license=('Apache-2.0') +# pkg_source="http://some_source_url/releases/${pkg_name}-${pkg_version}.tar.gz" +# pkg_filename="${pkg_name}-${pkg_version}.tar.gz" +# pkg_shasum="TODO" +pkg_deps=( + core/coreutils + core/curl + core/jq-static + core/ruby + core/bundler + core/hab-butterfly + core/postgresql +) +pkg_build_deps=( + core/coreutils + core/glibc + core/git + core/diffutils + core/patch + core/make + core/gcc +) +pkg_lib_dirs=(lib) +pkg_include_dirs=(include) +pkg_bin_dirs=(bin) +pkg_exports=( + [secrets]=secrets +) +pkg_binds_optional=( + [chef-server-nginx]="port ssl-port" +) +# pkg_exposes=(port ssl-port) +# pkg_binds=( +# [database]="port host" +# ) +pkg_interpreters=(bin/bash) +pkg_svc_user="hab" +pkg_svc_group="$pkg_svc_user" +pkg_description="Some description." + +pkg_version() { + cat "$PLAN_CONTEXT/../../../VERSION" +} + +do_before() { + do_default_before + if [ ! -f "$PLAN_CONTEXT/../../../VERSION" ]; then + exit_with "Cannot find VERSION file! You must run \"hab studio enter\" from the chef-server project root." 56 + fi + update_pkg_version +} + +do_download() { + return 0 +} + +do_verify() { + return 0 +} + +do_unpack() { + # Copy everything over to the cache path so we don't write out our compiled + # deps into the working directory, but into the cache directory. + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname" + cp -R "$PLAN_CONTEXT/../"* "$HAB_CACHE_SRC_PATH/$pkg_dirname" +} + +do_prepare() { + return 0 +} + +do_build() { + return 0 +} + +do_install() { + # install gem dependencies for service hooks directly under $pkg_prefix + export HOME="${pkg_prefix}" + export RUBY_VENDOR="${pkg_prefix}/vendor/bundle" + bundle install --path ${RUBY_VENDOR} --binstubs + bundle config path ${RUBY_VENDOR} + + cp Gemfile* ${pkg_prefix} + + # install oc-chef-pedant in its own directory under $pkg_prefix + export pedant_src_dir=$(abspath $PLAN_CONTEXT/../../../oc-chef-pedant) + if [ ! "${pedant_src_dir}" ]; then + exit_with "Cannot find oc-chef-pedant src directory. You must run \"hab studio enter\" from the chef-server project root." 56 + fi + cp -pr ${pedant_src_dir} ${pkg_prefix} + export pedant_dir="${pkg_prefix}/oc-chef-pedant" + + # TODO: declare chef gem dependency in oc-chef-pedant + cp Gemfile.local "${pedant_dir}/Gemfile.local" + + # in pedant dir bundle install + pushd ${pedant_dir} + bundle install --path ${RUBY_VENDOR} + bundle config path ${RUBY_VENDOR} + popd + + # in chef dir bundle install + export chef_dir="${pkg_prefix}"/chef + mkdir $chef_dir + pushd $chef_dir + + cat > Gemfile << EOF +source 'https://rubygems.org' +gem 'chef' +gem 'knife-opc' +EOF + + bundle install --path ${RUBY_VENDOR} --binstubs + bundle config path ${RUBY_VENDOR} + + cp $PLAN_CONTEXT/bin/oc-chef-pedant.sh $pkg_prefix/bin/chef-server-test + chmod +x $pkg_prefix/bin/chef-server-test + + cp $PLAN_CONTEXT/bin/knife-pivotal.sh $pkg_prefix/bin/knife + chmod +x $pkg_prefix/bin/knife + + popd + + # + # Chef-server-ctl install + echo "====== BUILDING CHEF_SERVER_CTL ==== " + echo $PLAN_CONTEXT $pkg_prefix + export omnibus_ctl_dir="${pkg_prefix}"/omnibus-ctl + + cp -R ../../omnibus/files/private-chef-ctl-commands $omnibus_ctl_dir + install $PLAN_CONTEXT/bin/chef-server-ctl.sh $pkg_prefix/bin/chef-server-ctl + fix_interpreter $pkg_prefix/omnibus-ctl/chef-server-ctl core/ruby bin/ruby + + pushd ${omnibus_ctl_dir} + bundle install --path ${RUBY_VENDOR} --binstubs + bundle config path ${RUBY_VENDOR} + popd + + # We install a bunch of stuff (specifically chef in private-chef-ctl) from git + find ${RUBY_VENDOR} -name .git | xargs rm -rf + rm -rf "${HOME}/.bundle/cache" + rm -rf ${RUBY_VENDOR}/ruby/*/cache + +} + +do_check() { + return 0 +} + +do_end() { + # Clean up the `env` link, if we set it up. + if [[ -n "$_clean_env" ]]; then + rm -fv /usr/bin/env + fi +} diff --git a/src/dbdpg/README.md b/src/dbdpg/README.md new file mode 100644 index 0000000000..1d122e5550 --- /dev/null +++ b/src/dbdpg/README.md @@ -0,0 +1,3 @@ +# DBD-Pg + +This directory contains a Habitat plan for [DBD-Pg](http://search.cpan.org/dist/DBD-Pg/) which is needed by [Workflow server](/server) to do Sqitch deploys to PostgreSQL. diff --git a/src/dbdpg/habitat/plan.sh b/src/dbdpg/habitat/plan.sh new file mode 100644 index 0000000000..131859415d --- /dev/null +++ b/src/dbdpg/habitat/plan.sh @@ -0,0 +1,45 @@ +pkg_name=dbdpg +pkg_origin=chef-server +pkg_version="3.5.3" +pkg_maintainer="The Chef Automate Maintainers " +pkg_license=('Artistic-1.0-Perl' 'GPL-2.0') +pkg_source=nosuchfile.tgz +pkg_shasum=7e98a9b975256a4733db1c0e974cad5ad5cb821489323e395ed97bd058e0a90e +pkg_deps=( + core/glibc + core/perl + core/postgresql + core/zlib +) +pkg_build_deps=( + core/cpanminus + core/local-lib + core/gcc + core/make +) +pkg_lib_dirs=(lib/perl5/x86_64-linux-thread-multi) +pkg_description="DBD::Pg is a Perl module that works with the DBI module to provide access to PostgreSQL databases." +pkg_upstream_url="http://search.cpan.org/dist/DBD-Pg/" + +do_download() { + return 0 +} + +do_unpack() { + return 0 +} + +do_verify() { + return 0 +} + +do_build() { + return 0 +} + +do_install() { + source <(perl -I"$(pkg_path_for core/local-lib)/lib/perl5" -Mlocal::lib="$(pkg_path_for core/local-lib)") + source <(perl -I"$(pkg_path_for core/cpanminus)/lib/perl5" -Mlocal::lib="$(pkg_path_for core/cpanminus)") + source <(perl -Mlocal::lib="$pkg_prefix") + cpanm "DBD::Pg@$pkg_version" --local-lib "$pkg_prefix" +} diff --git a/src/nginx/habitat/config/chef_http_lb.conf b/src/nginx/habitat/config/chef_http_lb.conf new file mode 100644 index 0000000000..6e6c61cef7 --- /dev/null +++ b/src/nginx/habitat/config/chef_http_lb.conf @@ -0,0 +1,3 @@ +{{~#with cfg/http}} +{{~> chef_http_lb_common }} +{{~/with}} diff --git a/src/nginx/habitat/config/chef_http_lb_common b/src/nginx/habitat/config/chef_http_lb_common new file mode 100644 index 0000000000..8bdc118a1c --- /dev/null +++ b/src/nginx/habitat/config/chef_http_lb_common @@ -0,0 +1,151 @@ + {{!-- This file is a partial and is only rendered because habitat doesn't know the difference --}} +# This portion comes from chef_http_lb_common via {{parent}} + server { + listen {{port}}; + server_name api.chef-server.dev; + + {{#if ../bind.chef-server-ctl ~}} + {{#eachAlive ../bind.chef-server-ctl.members as |member| ~}} + {{#if @last ~}} + set_by_lua_block $data_collector_token { + return "{{member.cfg.secrets.data_collector.token}}"; + } + {{/if ~}} + {{/eachAlive ~}} + {{else ~}} + set_by_lua $data_collector_token 'return os.getenv("DATA_COLLECTOR_TOKEN")'; + {{~/if}} + + access_log stdout opscode; +{{~#if is_ssl}} + ssl on; + ssl_certificate {{../pkg.svc_data_path}}/ca/{{../cfg.server_name}}.cert; + ssl_certificate_key {{../pkg.svc_data_path}}/ca/{{../cfg.server_name}}.key; + ssl_dhparam {{../pkg.svc_data_path}}/ca/dhparams.pem; + + ssl_session_timeout 5m; + + ssl_protocols {{../cfg.ssl_protocols}}; + ssl_ciphers {{../cfg.ssl_ciphers}}; + ssl_prefer_server_ciphers on; +{{~/if}} + root {{../pkg.path}}/static/html; + client_max_body_size 250m; + + proxy_set_header Host $http_host; # http_host preserves port # if present + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; +{{~#if is_ssl}} + proxy_set_header X-Forwarded-Proto https; +{{~else}} + proxy_set_header X-Forwarded-Proto http; +{{~/if}} + proxy_pass_request_headers on; + proxy_connect_timeout 1; + proxy_send_timeout 300; + proxy_read_timeout 300; + + error_page 404 =404 /404.html; + error_page 503 =503 /503.json; + + # Whitelist the docs necessary to serve up error pages and friendly + # html to non-chef clients hitting this host. + location ~ "^/[0-9]{3,3}\.(json|html)|favicon.ico|index.html$" { + } + location "/css/" { + } + location "/images/" { + } + + location /version { + types { } + default_type text/plain; + alias {{../pkg.svc_config_path}}/version-manifest.txt; + } + + location ~ "^/organizations/([^/]+)/validate" { + allow 127.0.0.1; + allow ::1; + deny all; + proxy_pass http://opscode_erchef; + } + +{{#if ../bind.oc_erchef ~}} + {{#eachAlive ../bind.oc_erchef.members as |member| ~}} + {{#if @last ~}} + {{#if member.cfg.data_collector_enabled ~}} + location ~ "^/organizations/([^/]+)/data-collector$" { + set $request_org $1; + access_by_lua_block { validator.validate("POST") } + proxy_set_header x-data-collector-token $data_collector_token; + proxy_set_header x-data-collector-auth "version=1.0"; + rewrite ^ /data-collector/v0/ break; + proxy_pass https://data-collector; + } + {{/if ~}} + {{/if ~}} + {{/eachAlive ~}} +{{~/if}} + + location ~ "^/organizations/([^/]+)/required_recipe$" { + # This endpoint is unique because it is defined via nginx and is not + # handled by an upstream like oc_erchef. In order to make responses + # consistent between Chef server and chef-zero we'll always enable + # the location but explicitly return a 404 when the feature is disabled, + # rather than leave it undefined. If we were to leave it undefined any + # non-signed requests would be routed to the main index page and return + # a 200 instead of 404. + return 404; + } + + + # bookshelf + location ~ "^/bookshelf/organization-.+" { + proxy_pass http://bookshelf; + } + + # erchef status endpoint + location ~ "^/_status/?$" { + types { } + default_type application/json; + proxy_pass http://opscode_erchef; + } + + # erchef stats endpoint + location ~ "^/_stats/?$" { + auth_basic "Chef Server Admin Stats"; + # HABITAT: CHECK maybe store in ring + auth_basic_user_file {{../pkg.svc_config_path}}/stats_htpasswd; + types { } + default_type application/json; + proxy_pass http://opscode_erchef; + } + + # This variable is set to an empty string here so it can be used in + # dispatch.lua later on. An add-on can set this variable to be used as an + # upstream if we determine the request was not intended to go to the API. + set $add_on_override_upstream ""; + + # Include external routes for addons + # HABITAT: NO ADDONS YET + # include /var/opt/opscode/nginx/etc/addon.d/*_external.conf; + + # oc-id + include {{../pkg.svc_config_path}}/oc-id-external.conf; + + location /_route/ { + default_type 'application/json'; + content_by_lua_file '{{../pkg.svc_config_path}}/dispatch_route.lua'; + } + + location / { + satisfy any; + + + set $mode "api"; + set $upstream ""; + rewrite_by_lua_file '{{../pkg.svc_config_path}}/dispatch.lua'; + proxy_pass http://$upstream; + proxy_redirect http://$upstream /; + } + } diff --git a/src/nginx/habitat/config/chef_https_lb.conf b/src/nginx/habitat/config/chef_https_lb.conf new file mode 100644 index 0000000000..94f589b73d --- /dev/null +++ b/src/nginx/habitat/config/chef_https_lb.conf @@ -0,0 +1,3 @@ +{{~#with cfg/https}} +{{~> chef_http_lb_common }} +{{~/with}} diff --git a/src/nginx/habitat/config/config.lua b/src/nginx/habitat/config/config.lua new file mode 100755 index 0000000000..4623ceab82 --- /dev/null +++ b/src/nginx/habitat/config/config.lua @@ -0,0 +1,218 @@ +-- this script exposes get_org_config which returns an array of +-- darklaunch configuration maps retrieved from redis +module("config", package.seeall) + +-- NOTE: assumption that the following is defined globally: +-- redis = require("resty/redis") + +local redis_password = os.getenv("REDIS_PASSWORD") + +local failsafe_config = {} +failsafe_config["dl_default"] = {} +failsafe_config["dl_default"][1] = "503_mode" +failsafe_config["dl_default"][2] = false +failsafe_config["dl_default"][3] = "couchdb_containers" +failsafe_config["dl_default"][4] = false +failsafe_config["dl_default"][5] = "couchdb_groups" +failsafe_config["dl_default"][6] = false +failsafe_config["dl_default"][7] = "couchdb_acls" +failsafe_config["dl_default"][8] = false +failsafe_config["dl_default"][9] = "couchdb_association_requests" +failsafe_config["dl_default"][10] = false +failsafe_config["dl_default"][11] = "couchdb_organizations" +failsafe_config["dl_default"][12] = false +failsafe_config["dl_default"][13] = "couchdb_associations" +failsafe_config["dl_default"][14] = false + +-- Client must set these via set_ban_refresh_interval +-- and set_maint_refresh_interval. +maint_refresh_interval = nil +ban_refresh_interval = nil +default_org = false + +local function b_to_i(v) + -- these values are coming back as a string from redis. Handle both scenarios + -- so that we don't break if our redis interface gets smarter down the road. + if v == "true" or v == true then + return 1 + elseif v == "false" or v == false then + return 0 + else + return v -- preserve string values + end +end + +local function array_to_hash(t) + local h = {} + for i = 1, #t, 2 do + h[t[i]] = t[i + 1] + end + return h +end + +local function auth_not_required(err) + -- conservatively try to determine if auth is not required. + -- if auth isn't required, we can safely ignore an error from + -- the AUTH command + return err == "ERR Client sent AUTH, but no password is set" +end + +local function connect_redis() + local red = redis:new() + red:set_timeout(1000) + local ok, err = red:connect("127.0.0.1", 16379) + + if not ok then + ngx.log(ngx.ERR, "failed to connect redis: ", err) + end + + if redis_password == nil then + ngx.log(ngx.ERR, "REDIS_PASSWORD not found in the environment") + ok = false + else + local ok, err = red:auth(redis_password) + if not ok then + if auth_not_required(err) then + ok = true + else + ngx.log(ngx.ERR, "failed to authenticate to redis: ", err) + end + end + end + return ok, red +end + +local function close_redis(red) + local ok, err = red:set_keepalive(2000, 250) + if not ok then + ngx.log(ngx.ERR, "Failed to set keepalive: " .. err) + end +end + +-- Get configuration information for default/orgname/override in pipelined fashion +-- return failsafe defaults if redis error occurs otherwise an array of results. +local function redis_pipelined_get_config(red, orgname) + red:init_pipeline() + red:hgetall("dl_default") + red:hgetall("dl_org_" .. orgname) + red:hgetall("dl_override") + local results, err = red:commit_pipeline() + if not results then + ngx.log(ngx.ERR, "failed to commit the pipelined requests: " .. err ) + results = failsafe_config + end + return results +end + +local function redis_fetch_set(set_name) + local result = {} + local ok, red = connect_redis() + if ok then + result, err = red:smembers(set_name); + if err then + ngx.log(ngx.ERR, "Redis read error retrieving " .. set_name .. ": " .. err); + ok = false + else + close_redis(red) + end + end + return ok, result +end + +-- Connect to redis and retrieve configuration for this org. +local function get_org_config(orgname) + -- Habitat doesn't support darklaunch + results = failsafe_config + return results +end + +-- Examines the shared_dict provided and determine if it needs updating +-- based on expiry time. If so, it clears all entries and refreshes +-- the dict data from redis +local function refresh_expiring_set(shared_dict, name, interval) + local updated_at = shared_dict:get("updated_at"); + if updated_at == nil or (ngx.now() - updated_at) >= interval then + ok, updated_data = redis_fetch_set(name) + if ok then + shared_dict:flush_all(); + for index, key in ipairs(updated_data) do + shared_dict:set(key, true); + end + shared_dict:set("updated_at", ngx.now()); + end + end +end + +-- return true if our systems are in maintenance mode +-- and the remote address is not excluded from maintenance mode +-- will refresh maint-mode state and whitelisted IPs periodically. +function config.is_in_maint_mode_for_addr(remote_addr) + -- Stub out for habitat + -- local maint = ngx.shared.maint_data + -- refresh_expiring_set(maint, "maint_data", maint_refresh_interval) + -- return maint:get("maint_mode") + return false +end + +-- This does not attempt to refresh the maintenance mode data, as it's +-- intended to be invoked only after main data has been loaded/refreshed earlier int he flow +function config.is_route_in_maint_mode(route_id) + -- Stub out for habitat + -- local maint = ngx.shared.maint_data + -- return maint:get("maint_mode_" .. route_id) + return false +end + +function config.is_route_darklaunched(route_id) + -- Stub out for habitat + -- local maint = ngx.shared.maint_data + -- return maint:get("dl_" .. route_id) + return false +end + +-- return true if the given address is maintenance mode +-- whitelist +function config.is_addr_whitelisted(component, remote_addr) + return false +end + + +function config.is_addr_banned(remote_addr) + return false +end + +-- Get 'raw' org configuration - nested array of darklaunch rules +function config.raw_org_config(org) + local results = get_org_config(org) + local c = {} + local i = 1 + for index, result in pairs(results) do + c[i] = {} + for k, v in pairs(array_to_hash(result)) do c[i][k] = v end + i = i + 1 + end + return c +end + +-- Get org configuration and darklaunch rules +function config.org_config(org) + local results = get_org_config(org or "_OC_INTERNAL_NO_ORG") + -- merge results into a single table, with later key values overwriting earlier ones. + local org_config = {} + for index, result in pairs(results) do + for k, v in pairs(array_to_hash(result)) do org_config[k] = b_to_i(v) end + end + return org_config +end + +function set_ban_refresh_interval(interval) + ban_refresh_interval = interval +end + +function set_maint_refresh_interval(interval) + maint_refresh_interval = interval +end + +function set_default_org(org) + default_org = org +end diff --git a/src/nginx/habitat/config/dispatch.lua b/src/nginx/habitat/config/dispatch.lua new file mode 100755 index 0000000000..f5aa4064ec --- /dev/null +++ b/src/nginx/habitat/config/dispatch.lua @@ -0,0 +1,106 @@ +-- Note: these modules are loaded during server init, since they're +-- used in every request: +-- routes = routes.lua +-- config = config.lua +-- resolver = resolver.lua + +local mode = ngx.var.mode +local internal = false +local min_version = 10 + +local remote_addr = ngx.var.remote_addr + + +-- Before we go any further, check banned IPs +if config.is_addr_banned(remote_addr) then + ngx.exit(ngx.HTTP_FORBIDDEN) +end + +-- If chef-user is missing then it's presumably not a chef client. +local userid = ngx.var.http_x_ops_userid or "" +local uri = ngx.var.uri + +-- A couple of early-exit validations that are specific +-- to an api vhost: +if mode == "api" then + -- If we've defined an override upstream and we don't have a userid, use + -- the override upstream. + local override_upstream = ngx.var.add_on_override_upstream + if userid == "" and override_upstream ~= "" then + ngx.var.upstream = override_upstream + return + end + + -- Internal API does not validate chef version + if not internal then + -- Exit early: If they don't have the right chef version, send them packing. + local version = ngx.var.http_x_chef_version + if version then + if not routes.is_client_version_valid(version, min_version) then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.say('{"error": "400 - Bad Request: Chef Client version ' .. + min_version .. ' or higher required. Your version of Chef is ' .. + version .. '."}') + ngx.exit(ngx.HTTP_OK) + end + else + -- the request did not originate with a chef client, we'll give something + -- more friendly to web browsers. + return ngx.exec("/index.html") + end + end + + -- Exit early: If they don't have a chef user id we don't want them here. + -- Only API server (both internal and external) verify that user id is present. + if userid == "" then + ngx.exit(ngx.HTTP_UNAUTHORIZED) + end +end + + +-- global maint mode && address NOT excluded? no can do, muchacho +if config.is_in_maint_mode_for_addr(remote_addr) then + ngx.exit(ngx.HTTP_SERVICE_UNAVAILABLE) +end + +-- Parse the URI to determine that it points to a valid destination +-- and extract necessary components to move forward. +local route = routes.resolve_uri(mode, uri) + +-- If we don't have a route identifier it means we couldn't match the url +-- to a valid path. +if not route.route_id then + ngx.exit(ngx.HTTP_NOT_FOUND) +end + +-- Load restrictions and darklaunch constraints for this org. +-- note that org name may not be valid - we'll use an appropriate default if it's not, +-- such as when the URI is valid but does not contain an org name. +route.org_config = config.org_config(route.org_name, mode) + +-- API mode means that internal routing rules do not apply, so only flag as internal if +-- we're not in api mode +if not (mode == "api") then + route.internal = internal +end + +-- All remaining checks are done with the full request context that we've built +-- Any non-zero response indicates an http response code that we must terminate with +-- immediately. +local response = route_checks.run(route) +if (response > 0) then + ngx.exit(response) +end + +-- At long last! Against all odds, they're cleared to move onto +-- their final destination. Let's figure out what that is, pack +-- them up with a shiny new set of darklaunch headers, +-- and send them on their way +ngx.var.upstream = resolver.determine_upstream(route) + +local dl_header = "" + +for k, v in pairs(route.org_config) do dl_header = dl_header .. k .. "=" .. v .. ";" end +ngx.req.set_header("X-Ops-DarkLaunch", dl_header) + + diff --git a/src/nginx/habitat/config/dispatch_route.lua b/src/nginx/habitat/config/dispatch_route.lua new file mode 100755 index 0000000000..87ef84b970 --- /dev/null +++ b/src/nginx/habitat/config/dispatch_route.lua @@ -0,0 +1,40 @@ +-- This is for use with the internal /_routes endpoints +local cjson = require("cjson") +cjson.encode_keep_buffer(false) +args = ngx.req.get_uri_args() + +-- Let's go ahead pull the meat out of the url - easy, since we're guaranteed that our URI +-- is prefixed with "/_route/" +local uri = string.sub(ngx.var.uri, 8) +local route = routes.resolve_uri("api", uri) +if not route.route_id then + ngx.say('{"error": "no route exists for the uri ' .. uri .. '"}') + ngx.exit(ngx.HTTP_OK) +end + +route.org_config = config.org_config(route.org_name) +upstream = resolver.determine_upstream(route) +-- +-- construct a table with response data, then serialize to json +local json = {} +-- Start out with the basics +json['org_name'] = route.org_name +json['upstream_target'] = upstream +json['uri'] = uri +json['route'] = route.route_id +json['endpoint'] = route.endpoint +json['object_name'] = route.object_name or "" +json['config'] = {} +json['config']['merged'] = route.org_config + +-- debug config - include merged and default/org/override config. +if args['all'] == "1" then + local raw = config.raw_org_config(org_name) + json['config']['default'] = raw[1] + json['config']['org'] = raw[2] + json['config']['override'] = raw[3] +end + +-- Send it back and done. +ngx.say(cjson.encode(json)) +ngx.exit(ngx.HTTP_OK) diff --git a/src/nginx/habitat/config/nginx.conf b/src/nginx/habitat/config/nginx.conf new file mode 100644 index 0000000000..1233607e4f --- /dev/null +++ b/src/nginx/habitat/config/nginx.conf @@ -0,0 +1,139 @@ +worker_processes 4; +error_log stderr; +# Enviroment variables that we wish to access while running must be declared here, +# otherwise they will not be available. +# env REDIS_PASSWORD; # Not used in habitat chef server +env DATA_COLLECTOR_TOKEN; +daemon off; + +events { + worker_connections 1024; +} + +http { + log_format opscode '$remote_addr - $remote_user [$time_local] ' + '"$request" $status "$request_time" $body_bytes_sent ' + '"$http_referer" "$http_user_agent" "$upstream_addr" "$upstream_status" "$upstream_response_time" "$http_x_chef_version" "$http_x_ops_sign" "$http_x_ops_userid" "$http_x_ops_timestamp" "$http_x_ops_content_hash" $request_length'; + + server_names_hash_bucket_size 128; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + + keepalive_timeout 65; + + gzip on; + gzip_http_version 1.0; + gzip_comp_level 2; + gzip_proxied any; + gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript application/json; + + include {{pkgPathFor "chef-server/openresty-noroot"}}/nginx/conf/mime.types; + + ## TODO Maybe put this back into a subdir + lua_package_path "{{pkg.svc_config_path}}/?.lua;$prefix/?.lua;;"; + lua_shared_dict maint_data 1m; + lua_shared_dict banned_ips 1m; + + # Preload some shared modules globally + init_by_lua ' +-- redis = require("resty/redis") # Not used in habitat chef server + config = require("config") + routes = require("routes") + resolver = require("resolver") + route_checks = require("route_checks") + validator = require("validator") + +-- Habitat doesnt support redis based dark launch and ban for now +-- config.set_maint_refresh_interval(600) +-- config.set_ban_refresh_interval(600) +-- config.set_default_org(false) + '; + + # Listen for local requests to the stub_status module. + server { + listen 127.0.0.1:9999; + location /nginx_status { + stub_status on; + access_log off; + allow 127.0.0.1; + deny all; + } + } + + upstream opscode_erchef { + {{~#if bind.oc_erchef}} + # oc_erchef + {{~#eachAlive bind.oc_erchef.members as |member|}} + server {{ member.sys.ip}}:{{member.cfg.port}}; + {{~/eachAlive}} + {{~/if}} + } + upstream oc_bifrost { + {{~#if bind.oc_bifrost}} + # oc_bifrost + {{~#eachAlive bind.oc_bifrost.members as |member|}} + server {{member.sys.ip}}:{{member.cfg.port}}; + {{~/eachAlive ~}} + {{~/if}} + } + upstream opscode_solr4 { + {{~#if bind.elasticsearch}} + {{~#eachAlive bind.elasticsearch.members as |member|}} + server {{member.sys.ip}}:{{member.cfg.http-port}}; + {{~/eachAlive}} + {{~/if}} + } + upstream bookshelf { + {{~#if bind.bookshelf}} + {{~#eachAlive bind.bookshelf.members as |member|}} + server {{member.sys.ip}}:{{member.cfg.port}}; + {{~/eachAlive}} + {{~/if}} + } + upstream oc_id { + {{#if bind.oc_id}} + {{#eachAlive bind.oc_id.members as |member|}} + server {{member.sys.ip}}:{{member.cfg.port}}; + {{/eachAlive}} + {{/if}} + } +{{#if bind.oc_erchef ~}} + {{#eachAlive bind.oc_erchef.members as |member| ~}} + {{#if @last ~}} + {{#if member.cfg.data_collector_enabled ~}} + upstream data-collector { + server {{member.cfg.data_collector_server}}:{{member.cfg.data_collector_port}}; + } + {{/if ~}} + {{/if ~}} + {{/eachAlive ~}} +{{/if ~}} + + # Include upstream definitions for addons + # TODO HABITAT plugins not yet + # include /var/opt/opscode/nginx/etc/addon.d/*_upstreams.conf; + + fastcgi_temp_path "{{pkg.svc_data_path}}/tmp/fastcgi"; + client_body_temp_path "{{pkg.svc_data_path}}/tmp/client_body"; + uwsgi_temp_path "{{pkg.svc_data_path}}/tmp/uwsgi"; + scgi_temp_path "{{pkg.svc_data_path}}/tmp/scgi"; + + # external lb config for Chef API + proxy_cache_path {{pkg.svc_data_path}}/cache/webui levels=1:2 keys_zone=webui-cache:50m max_size=5000m inactive=600m; + proxy_cache_path {{pkg.svc_data_path}}/cache/cookbooks levels=1:2 keys_zone=cookbooks:50m max_size=5000m inactive=600m; + proxy_temp_path {{pkg.svc_data_path}}/cache-tmp; + + # We support three options: serve nothing on non_ssl_port (80), + # redirect to https, or actually serve the API. + # server { + # listen 80; + # access_log {{pkg.svc_var_path}}/log/rewrite-port-80.log; + # return 301 https://$host$request_uri; + # } + + # Chef HTTPS API + include {{pkg.svc_config_path}}/chef_http_lb.conf; + include {{pkg.svc_config_path}}/chef_https_lb.conf; +} diff --git a/src/nginx/habitat/config/oc-id-external.conf b/src/nginx/habitat/config/oc-id-external.conf new file mode 100644 index 0000000000..6e33d1efd9 --- /dev/null +++ b/src/nginx/habitat/config/oc-id-external.conf @@ -0,0 +1,6 @@ +rewrite ^/id$ /id/ permanent; + +location ~ "^/id/.*$" { + proxy_redirect http://oc_id /; + proxy_pass http://oc_id; +} diff --git a/src/nginx/habitat/config/resolver.lua b/src/nginx/habitat/config/resolver.lua new file mode 100755 index 0000000000..39a8347edc --- /dev/null +++ b/src/nginx/habitat/config/resolver.lua @@ -0,0 +1,53 @@ +module("resolver", package.seeall) +local upstream_resolver = { } + +-- To preserve the ability to run tests locally, please comment out any +-- chef templating if statements so that the un-rendered portions run in tests. +-- For example: +-- + +-- These names map directly to named upstreams +upstream_resolver.chef = "opscode_chef" +upstream_resolver.erchef = "opscode_erchef" +upstream_resolver.acct = "opscode_account" + +-- Resolve an upstream, applying any necessary darklaunch controls. +-- Once we have an upstream name, modify it if the upstream +-- is tarpitted per the configuration. +upstream_resolver.resolve = function(route) + local upstream = nil + local route_id = route.route_id + local resolver_fun = upstream_resolver[route_id .. "_fun"] + if resolver_fun then + route_id = resolver_fun(route) + end + return upstream_resolver.tarpitify(route_id, route.org_config, route.internal) +end + +-- If config names a tarpit for the given route, then +-- alter the upstream name to the appropriate tarpit +upstream_resolver.tarpitify = function(route_id, org_config, internal) + local upstream = upstream_resolver[route_id] + -- internal non-API lbs don't tarpit + if internal then + return upstream + end + + tarpitid = org_config["tarpit_" .. route_id] + if tarpitid then + return upstream .. "_tarpit" .. tarpitid + end + return upstream +end + +upstream_resolver.acct_erchef_fun = function(route) + if route.org_config["couchdb_" .. route.endpoint] == 0 then + return "erchef" + end + return "acct" +end + +-- If this is an internal non API vhost , the caller must also ensure that route.internal == true +function resolver.determine_upstream(route) + return upstream_resolver.resolve(route) +end diff --git a/src/nginx/habitat/config/route_checks.lua b/src/nginx/habitat/config/route_checks.lua new file mode 100755 index 0000000000..85eb091681 --- /dev/null +++ b/src/nginx/habitat/config/route_checks.lua @@ -0,0 +1,88 @@ +module("route_checks", package.seeall) + +-- To preserve the ability to run tests locally, please comment out any +-- chef templating if statements so that the un-rendered portions run in tests. +-- For example: +-- + +local response = {} +response[403] = {} +response[404] = {} +response[503] = {} + +-- To add checks for a new response code, first declare it above in the form +-- response[CODE] = {}, then at minimum add a new function response[CODE].default = function(route) + + +-- To add route-specific checks, add a new function for the given +-- response code in the form of response[CODE].ROUTE_ID = function(route) +-- This function must return "true" if the request shoudl be terminated with "CODE". +-- +-- For example to implement "a 404 should occur for the users endpoint on the +-- "acct" route if we are configured to force users endpoint not found": +-- +-- response[404].acct = function(org_config, endpoint, object_name) +-- return endpoint == "users" and org_config['force_users_not_found'] == 1 +-- end +-- + +-- +-- Default checks -- applied first regardless of endpoint or object-name +-- are handled here. +-- +response[503].default = function(route) + -- Habitat chef server doesn't support 503 mode yet + if route.org_config["503_mode"] == 1 or + config.is_route_in_maint_mode(route.route_id) then + return true + end + return false +end + +response[404].default = function(route) + -- route-level darklaunch check: + -- Habitat chef server doesn't support 404 mode yet + if config.is_route_darklaunched(route.route_id) then + return not (route.org_config["dl_" .. route.route_id] == 1) + end + return false +end + +response[403].default = function(route) + -- Habitat chef server doesn't support blocked orgs yet + return route.org_config["org_blocked"] == 1 +end + +-- +-- Endpoint-specific checks from here on down. +-- + +-- return true if client is posting to "organizations" endpoint but +-- new org creation is disabled +response[503].acct = function(route) + -- Habitat chef server doesn't support 503 mode yet + return false + -- return route.org_name == nil and + -- route.endpoint == "organizations" and + -- ngx.req.get_method() == "POST" and + -- route.org_config["disable_new_orgs"] == 1 +end + +-- +-- Our only public interface +-- + +-- Run all available checks for the given parameters. Returns 0 +-- if it's clear to proceed, otherwise it returns an http response code +function route_checks.run(route) + for code, check in pairs(response) do + if check.default(route) or + (check[route.route_id] and + check[route.route_id](route)) then + return code + end + end + return 0 +end + + diff --git a/src/nginx/habitat/config/routes.lua b/src/nginx/habitat/config/routes.lua new file mode 100755 index 0000000000..2d89184eb5 --- /dev/null +++ b/src/nginx/habitat/config/routes.lua @@ -0,0 +1,271 @@ +module("routes", package.seeall) + +-- To preserve the ability to run tests locally, please comment out any +-- chef templating if statements so that the un-rendered portions run in tests. +-- For example: +-- + +-- For lpeg syntax, reference www.inf.puc-rio.br/~roberto/lpeg/ +local lpeg = require "lpeg" +local match = lpeg.match +local P = lpeg.P +local B = lpeg.B +local S = lpeg.S +local R = lpeg.R +local C = lpeg.C +local Ct = lpeg.Ct +local Cg = lpeg.Cg +local Cc = lpeg.Cc + +-- create a capture that maps the value of 'name' to capture group "route_id" +local function Crt(name) + return Cg(Cc(name), "route_id") +end + +-- create a capture that maps the captured value of 'exp' to capture group 'endpoint' +local function Cendpoint(exp) + return Cg(exp, "endpoint") +end + +-- match the first argument and set the endpoint to the second argument. +-- useful for when you want to set a route to an endpoint whose name +-- is not contained in the route. +-- for example: +-- Cmatch_and_assign_endpoint(p_users, "associations") * p_sep * p_eol +-- would match /users(/) and set its endpoint to "associations" +local function Cmatch_and_assign_endpoint(p_to_match, endpoint_string) + return p_to_match * Cendpoint(Cc(endpoint_string)) +end + +-- Basic identifiers +local p_eol = P(-1) +local p_lower = R"az" +local p_upper = R"AZ" +local p_numeric = R"09" +local p_special = S"_-" +local p_sep = P"/" +local p_dot = P"." + +-- Route Components +local p_org = P"organizations" +local p_org_base = P"/organizations" +local p_auth_user = P"authenticate_user" +local p_system_recovery = P"system_recovery" +local p_license = P"license" +local p_api_version = P"server_api_version" +local p_acl = P"/_acl" +local p_search = P"search" +local p_nodes = P"nodes" +local p_cookbooks = P"cookbooks" +local p_data = P"data" +local p_roles = P"roles" +local p_sandboxes = P"sandboxes" +local p_environments = P"environments" +local p_users = P"users" +local p_groups = P"groups" +local p_containers = P"containers" +local p_association_requests = P"association_requests" +local p_clients = P"clients" +local p_runs = P"runs" +local p_principals = P"principals" +local p_internal_organizations_base = P"/internal-organizations" +local p_controls = P"controls" +local p_keys = P"keys" +local p_policyfile = P"policies" +local p_policy_groups = P"policy_groups" +local p_cookbook_artifacts = P"cookbook_artifacts" +local p_universe = P"universe" + + +-- Composite patterns +local p_maybe_sep = p_sep^-1 +local p_trailing_sep = p_maybe_sep * p_eol +local p_org_identifier_char = p_lower + p_numeric + p_special +local p_org_identifier = p_org_identifier_char^1 +local p_until_next_separator = (P(1) - p_sep)^1 + +-- Because we implement different identifiers differently (users & orgs have one set of rules, +-- other objects have different rules) accept anything up to but not including the separator +-- as an identifier sufficient to pass along to the upstream. +local p_identifier = p_until_next_separator + +local p_org_prefix = p_org_base * p_sep +local p_named_org = p_org_prefix * Cg(p_org_identifier, "org_name") +local p_named_org_prefix = p_named_org * p_sep +local p_all_until_acl = (P(1) - p_acl)^1 +-- Not techncially for routing, this will be used to capture chef version from inbound headers +-- leaving it here to keep all of our grammar in one location. +local p_chef_version = Cg(p_numeric^1, "major") * p_dot * Cg(p_numeric^1, "minor") * p_dot + + +-- ROUTE CAPTURES +-- These captures will map the constant name to the capture group "route_id". +-- If you're adding a new route, the first step is to define the capture that returns +-- the route name below: + +local c_acct = Crt("acct") +local c_erchef = Crt("erchef") +local c_acct_erchef = Crt("acct_erchef") + +-- The "valid identifier" capture is used for any object name such +-- as node name, user name, etc. +local c_identifier = Cg(p_identifier, "object_name") + +local c_maybe_identifier = (p_sep * c_identifier)^-1 + +local p_erchef_endpoint = p_cookbooks + p_data + p_roles + p_sandboxes + + p_environments + p_clients + p_nodes + p_principals + + p_groups + p_containers + p_controls + + p_policy_groups + p_policyfile + p_cookbook_artifacts + p_universe + + +-- endpoints that map directly to erchef +-- If an object identifier is present - as identified with /IDENTIFIER - then +-- capture it. Otherwise -- require end-of-line (in other words, avoid the case +-- of "/nodesSOMEDATA" resolving to 'nodes' endpoint. +local p_erchef_direct = (Cendpoint(p_erchef_endpoint) * ((p_sep * c_identifier) + (p_maybe_sep * p_eol))) + + Cendpoint(p_search) + +-- users endpoint is currently split between erchef and account. +-- /users, /users/USERNAME/, /authenticate_user, and /system_recover -> erchef +-- /users/USERNAME/organizations, /users/USERNAME/association_requests go to account +local p_erchef_users = (p_sep * Cendpoint(p_users) * p_trailing_sep) + + (p_sep * Cendpoint(p_users) * (p_sep * c_identifier)^-1 * p_trailing_sep) + + (p_sep * Cendpoint(p_auth_user) * p_trailing_sep) + + (p_sep * Cendpoint(p_system_recovery) * p_trailing_sep) + +local p_keys_route = ((p_named_org_prefix * p_clients * p_sep * p_identifier * p_sep * Cendpoint(p_keys)) + + (p_named_org_prefix * p_users * p_sep * p_identifier * p_sep * Cendpoint(p_keys)) + + (p_sep * p_users * p_sep * p_identifier * p_sep * Cendpoint(p_keys))) * + ((p_sep * c_identifier)^-1 * p_trailing_sep) + + +local p_license_route = (p_sep * Cendpoint(p_license) * p_trailing_sep) +local p_server_api_version_route = (p_sep * Cendpoint(p_api_version) * p_trailing_sep) + +-- Everything that gets sent to erchef +local p_erchef = p_keys_route + + (p_named_org_prefix * p_erchef_direct) + + p_erchef_users + + p_license_route + + p_server_api_version_route + +-- erchef routing rules for chef_internal, which includes an additional +-- principals endpoint not exposed via the api rules +local p_erchef_int = p_erchef + + (p_named_org_prefix * Cendpoint(p_principals) * p_maybe_sep) + +-- /users endpoints for account: +local p_named_user = (p_sep * p_users * p_sep * c_identifier) + + + -- /organizations/:orgname/association_requests(/), custom endpoint for darklaunch +local p_acct_users = (p_named_org_prefix * Cendpoint(p_association_requests) * (p_sep + p_eol)) + + -- /organizations/:orgname/users/:username(/), endpoint "associations" for darklaunch + (p_named_org_prefix * Cmatch_and_assign_endpoint(p_users, "associations") * p_sep * c_identifier * (p_sep + p_eol)) + + -- /organizations/:orgname/users(/) + (p_named_org_prefix * Cmatch_and_assign_endpoint(p_users, "associations") * (p_sep + p_eol)) + + -- /users/:username/(/)association_requets(/) OR /users/:username/organiztaions(/) + p_named_user * + -- below rule for backwards compatibility with original: + -- /users/BLAH/{0,1}/association_requests + -- which permits: /users/BLAH//association_requests + ((p_sep * p_maybe_sep * Cendpoint(p_association_requests) * (p_sep + p_eol)) + + -- set /users/:username/organizations(/) to endpoint "associations" so we + -- can darklaunch it + (p_sep * Cmatch_and_assign_endpoint(p_org, "associations") * p_trailing_sep) ) + + +-- /organizations(/:orgname/) +local p_org_endpoint = (p_sep * Cendpoint(p_org) * (p_sep * Cg(p_org_identifier, "org_name"))^-1 * p_trailing_sep) + +-- note that the acl endpoint is a special case because it supercedes all others +-- including routes that would otherwise go to erchef. +local p_acl_endpoint = (p_named_org_prefix * p_all_until_acl * Cendpoint(p_acl) * (p_sep + p_eol)) + + (p_named_user * Cendpoint(p_acl) * (p_sep + p_eol)) + + +-- Default org endpoints. This is to help with migrating from OSC 11 to Chef Server 12 +local p_default_org_endpoints = p_search + p_nodes + p_cookbooks + p_data + p_roles + p_sandboxes + + p_environments + p_clients + p_runs + p_principals + p_groups + p_containers + +-- acct_erchef endpoints that are also routed to internal_acct +local p_internal_acct_erchef_endpoints = p_acct_users + p_org_endpoint + +local uri_resolvers = { + -- Retain ordering to ensure proper eval: + -- p_acl_endpont must come first because a trailing _acl takes precedence + -- over any other identifiers which may be in the url. + + api = (p_acl_endpoint * c_acct_erchef) + + (p_erchef * c_erchef) + + (p_internal_acct_erchef_endpoints * c_acct_erchef), + + -- This one is easy - everything passes through, though we'll still need to capture components + -- (org name, object name, endpoint name) where we can, so that post-route hooks can be applied. + internal_acct = + -- as in API, acls come first so that we can ensure webui requests for + -- acls get routed correctly to account, even when the underlying object is + -- darklaunched to erchef. + (p_acl_endpoint * c_acct_erchef) + + + -- TODO confirm - this should no longer be required. + -- Special case: webui1 will still send requests for clients over to us, so + -- we have to be able to route migrated endpoints (that used to be acct) to erchef. + -- we will need to continue to add migrated acct->erchef endpoints here + -- until we either correct or retire webui 1 + ((p_named_org_prefix * Cendpoint(p_clients) * c_maybe_identifier) * c_erchef) + + (p_erchef_users * c_erchef) + + (p_internal_acct_erchef_endpoints * c_acct_erchef) + + (p_internal_organizations_base * c_acct), + + -- Anything that routes to erchef or reporting is handled here. + internal_chef = (p_erchef_int * c_erchef) +} + +-- Checks if the request is for default orgs +local function needs_default_org(uri) + return match(p_sep * p_default_org_endpoints, uri) +end + +-- caller should bypass this client version check if caller is internal lb +function routes.is_client_version_valid(version, min) + local res = match(Ct(p_chef_version), version) + if (res == nil) then + return false + end + local major = tonumber(res.major) + local minor = tonumber(res.minor) + if (major or minor) and + -- Look at the provided arguments, but 'special case' from before we + -- changed our version string format. ONce we desupport that version, + -- we will need to make a final update here. + (major >= min or (major == 0 and minor == 10)) then + return true + end + return false +end + +-- Return a table containing: +-- route_id (route id, nil if invalid route) +-- org_name (nil if not found) +-- endpoint (nil if not found) +-- object_name (nil if not found) +function routes.resolve_uri(mode, uri) + if config.default_org and needs_default_org(uri) then + uri = '/organizations/' .. config.default_org .. uri + -- Deliberately do not rewrite uri here, and pass the + -- original to erchef. Client requests are checksummed + end + route = match(Ct(uri_resolvers[mode]), uri) + if not route then + route = {} + end + + -- normalize /_acl endpoint to acls + if route.endpoint == "/_acl" then + route.endpoint = "acls" + end + + return route +end diff --git a/src/nginx/habitat/config/validator.lua b/src/nginx/habitat/config/validator.lua new file mode 100755 index 0000000000..41c7e1ec21 --- /dev/null +++ b/src/nginx/habitat/config/validator.lua @@ -0,0 +1,36 @@ +module("validator", package.seeall) + +-- Map to translate HTTP verbs to ngx.HTTP_* numericals +local HTTP_REQ_MAP = { + GET = ngx.HTTP_GET, + POST = ngx.HTTP_POST, +} + +-- Validate the request against the `/validate` endpoint. +-- method: The allowed HTTP verb as a string. e.g: "GET", "POST" +function validator.validate(method) + if ngx.var.request_method ~= method then + ngx.exit(ngx.HTTP_NOT_ALLOWED) + end + + -- ngx.req.read_body() is required when using ngx.location.capture() + ngx.req.read_body() + + local res = ngx.location.capture( + "/organizations/" .. ngx.var.request_org .. "/validate" .. ngx.var.request_uri, + { + method = HTTP_REQ_MAP[ngx.req.get_method()], + always_forward_body = true, + copy_all_vars = true + } + ) + + if res.status == ngx.HTTP_OK then + return + else + -- return the validation sub-request error message + ngx.status = res.status + ngx.say(res.body) + ngx.exit(res.status) + end +end diff --git a/src/nginx/habitat/config/version-manifest.txt b/src/nginx/habitat/config/version-manifest.txt new file mode 100644 index 0000000000..406f6b4f0a --- /dev/null +++ b/src/nginx/habitat/config/version-manifest.txt @@ -0,0 +1,3 @@ +Package: {{pkg.ident}} +Habitat: {{sys.version}} +Member: {{sys.member_id}} / {{sys.hostname}} diff --git a/src/nginx/habitat/default.toml b/src/nginx/habitat/default.toml new file mode 100644 index 0000000000..5b0cd6d060 --- /dev/null +++ b/src/nginx/habitat/default.toml @@ -0,0 +1,37 @@ +# The maximum accepted body size for a client request, as indicated by the +# Content-Length request header. When the maximum accepted body size is greater +# than this value, a 413 Request Entity Too Large error is returned. +client_max_body_size = "250m" + +# The fully qualified domain name for the server. This should be set to the +# address at which people will be accessing the server. +server_name = "chef-server" + +# # SSL protocols and ciphers + +# These options provide the current best security with TSLv1 +# ssl_protocols = "-ALL +TLSv1" +# ssl_ciphers = "RC4:!MD5" + +# This might be necessary for auditors that want no MEDIUM security ciphers and +# don't understand BEAST attacks +# ssl_protocols = "-ALL +SSLv3 +TLSv1" +# ssl_ciphers = "HIGH:!MEDIUM:!LOW:!ADH:!kEDH:!aNULL:!eNULL:!EXP:!SSLv2:!SEED:!CAMELLIA:!PSK" + +# Based off of the Mozilla recommended cipher suite +# https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=nginx-1.8.1&openssl=1.0.1u&hsts=no&profile=modern +# NOTE: testssl.sh warns about Secure Renegotiation (CVE-2009-3555), +# but this might be ok since it should only allow for renegotiating from one +# of the allowed ciphers to another one. +# NOTE: AES256-GCM-SHA384 is not part of the Mozilla suite but has been added to +# support AWS's classic ELB's. Without it the health checks will fail. +ssl_protocols = "TLSv1.2" +ssl_ciphers = "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:AES256-GCM-SHA384:!aNULL:!eNULL:!EXPORT" + +# Ports used for SSL traffic; used to render partials for chef_http[s]_lb_conf +[http] +port = 8080 +is_ssl = false +[https] +port = 8443 +is_ssl = true diff --git a/src/nginx/habitat/hooks/health_check b/src/nginx/habitat/hooks/health_check new file mode 100644 index 0000000000..9c589ec58a --- /dev/null +++ b/src/nginx/habitat/hooks/health_check @@ -0,0 +1,20 @@ +#!/bin/sh +# + +# Health Check for oc_erchef + +# default return code is 0 +rc=0 + +{{pkgPathFor "core/curl"}}/bin/curl -k -X GET -sS --fail --max-time 2 https://localhost:{{cfg.https.port}}/_status + +case $? in + # Zero exit status means curl got back a 200 end everything is ok. + 0) + rc=0 ;; + # Anything else is critical. + *) + rc=2 ;; +esac + +exit $rc diff --git a/src/nginx/habitat/hooks/init b/src/nginx/habitat/hooks/init new file mode 100644 index 0000000000..8f9a94e77e --- /dev/null +++ b/src/nginx/habitat/hooks/init @@ -0,0 +1,39 @@ +#!/bin/bash + +exec 2>&1 + +for dir in log logs tmp; do + mkdir -pv "{{pkg.svc_var_path}}/$dir" +done + +for dir in ca cache cache-tmp tmp; do + mkdir -pv "{{pkg.svc_data_path}}/$dir" +done + +chown -R hab:hab "{{pkg.svc_config_path}}" "{{pkg.svc_data_path}}" \ + "{{pkg.svc_var_path}}" + +# Create SSL certs + +# This file is for openssl to put random bits into when doing key generation. +export RANDFILE="{{pkg.svc_data_path}}/.rnd" +touch $RANDFILE + +dhparam_file="{{pkg.svc_data_path}}/ca/dhparams.pem" +if [[ ! -f "$dhparam_file" ]]; then + openssl dhparam -dsaparam -out $dhparam_file 2048 +fi + +# Generate a private key if one does not exist. +cert_file="{{pkg.svc_data_path}}/ca/{{cfg.server_name}}.cert" +key_file="{{pkg.svc_data_path}}/ca/{{cfg.server_name}}.key" +if [[ ! -f "$cert_file" ]]; then + openssl req \ + -newkey rsa:2048 -nodes -keyout "$key_file" \ + -x509 -days 3650 -out "$cert_file" \ + -subj "/C=US/O=Chef Software/OU=Chef Server/CN=#{{cfg.server_name}}" + chmod 600 "$cert_file" "$key_file" +fi + +# quick and dirty hack to see the lpeg lua library +#ln -s {{pkgPathFor "irvingpop/openresty-lpeg"}}/lpeg.so {{pkgPathFor "core/openresty"}}/luajit/lib/lua/5.1/ diff --git a/src/nginx/habitat/hooks/reconfigure b/src/nginx/habitat/hooks/reconfigure new file mode 100644 index 0000000000..c8b477c3ee --- /dev/null +++ b/src/nginx/habitat/hooks/reconfigure @@ -0,0 +1,10 @@ +#!/bin/bash + +# Nginx starts as root and drops priveleges to hab, so these files need to be +# readable by hab at runtime and not just on startup. During a reconfigure +# permissions are reset, so we need to change them back in the reconfigure +# hook. +# +# See https://github.com/habitat-sh/habitat/issues/1547 +chown -R hab:hab "{{pkg.svc_config_path}}" "{{pkg.svc_data_path}}" \ + "{{pkg.svc_var_path}}" diff --git a/src/nginx/habitat/plan.sh b/src/nginx/habitat/plan.sh new file mode 100644 index 0000000000..b22c2dd631 --- /dev/null +++ b/src/nginx/habitat/plan.sh @@ -0,0 +1,77 @@ +pkg_name=chef-server-nginx +pkg_origin=chef-server +pkg_maintainer="The Chef Server Maintainers " +pkg_license=('Apache-2.0') +pkg_deps=( + core/curl + core/libossp-uuid + chef-server/openresty-noroot +) +pkg_build_deps=() +pkg_lib_dirs=(lib) +pkg_include_dirs=(include) +pkg_bin_dirs=(bin) +pkg_exposes=(port ssl-port) +pkg_exports=( + [port]=http.port + [ssl-port]=https.port +) +pkg_binds_optional=( + [bookshelf]="port" + [chef-server-ctl]="secrets" + [oc_erchef]="port data_collector_enabled data_collector_server data_collector_port" + [oc_bifrost]="port" + [elasticsearch]="http-port" + [oc_id]="port" +) +pkg_description="NGINX configuration and content for Chef Server" +pkg_upstream_url="https://docs.chef.io/server_components.html" +pkg_svc_run="openresty -c ${pkg_svc_config_path}/nginx.conf -p ${pkg_svc_var_path}" + +pkg_version() { + cat "$PLAN_CONTEXT/../../../VERSION" +} + +do_before() { + do_default_before + if [ ! -f "$PLAN_CONTEXT/../../../VERSION" ]; then + exit_with "Cannot find VERSION file! You must run \"hab studio enter\" from the chef-server project root." 56 + fi + update_pkg_version +} + +do_download() { + return 0 +} + +do_unpack() { + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname" + cp -R "$PLAN_CONTEXT/../"* "$HAB_CACHE_SRC_PATH/$pkg_dirname" + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname/static" + cp -R "$PLAN_CONTEXT/../../../omnibus/files/private-chef-cookbooks/private-chef/files/default/html" "$HAB_CACHE_SRC_PATH/$pkg_dirname/static" + printenv + echo $PLAN_CONTEXT $HAB_CACHE_SRC_PATH +# attach +} + +do_build() { + return 0 +} + +do_install() { + echo PKG_SVC_STATIC_PATH $pkg_svc_static_path + mkdir -p $pkg_svc_static_path + cp -R $HAB_CACHE_SRC_PATH/$pkg_dirname/static "$pkg_prefix" +# attach + return 0 +} + +do_strip() { + return 0 +} + + +## NOT RIGHT +do_after() { + return 0 +} diff --git a/src/oc-id/.gitignore b/src/oc-id/.gitignore index df18827e83..8e59e0cc86 100644 --- a/src/oc-id/.gitignore +++ b/src/oc-id/.gitignore @@ -30,3 +30,4 @@ vendor/bundle/ public/id/ .ruby-version +results/ diff --git a/src/oc-id/habitat/config/database-migrations.sh b/src/oc-id/habitat/config/database-migrations.sh new file mode 100644 index 0000000000..882c414b9d --- /dev/null +++ b/src/oc-id/habitat/config/database-migrations.sh @@ -0,0 +1,36 @@ +#!/bin/bash -e + +# TODO: not sure how to handle this. Sqitch bombs when it can't find the timezone +export TZ="UTC" + +export PERL5LIB="$(cat "$(hab pkg path "chef-server/dbdpg")/LD_RUN_PATH")" +export PATH=$PATH:$(hab pkg path "core/postgresql")/bin:$(hab pkg path "core/sqitch")/bin + +{{#if bind.database}} + {{#eachAlive bind.database.members as |member|}} + {{#if @last}} +HOST="{{member.sys.ip}}" +PORT="{{member.cfg.port}}" +USER="{{member.cfg.superuser_name}}" +PASS="{{member.cfg.superuser_password}}" + {{/if}} + {{/eachAlive}} +{{else}} +HOST="{{cfg.postgresql.vip}}" +PORT="{{cfg.postgresql.port}}" +USER="{{cfg.sql_user}}" +PASS="{{cfg.sql_password}}" +{{/if}} +DB="{{cfg.db.name}}" + +PG_ARGS="--host "$HOST" --port "$PORT" --username "$USER"" +export PGPASSWORD="$PASS" + +# Wait until postgres is ready +until pg_isready $PG_ARGS --quiet; do :; done + +# Create delivery db for sqitch to deploy to +createdb $PG_ARGS $DB "{{cfg.db.user}}" + +# Install uuid-ossp extension +psql $PG_ARGS --command 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"' $DB diff --git a/src/oc-id/habitat/config/database.yml b/src/oc-id/habitat/config/database.yml new file mode 100644 index 0000000000..436ff51a21 --- /dev/null +++ b/src/oc-id/habitat/config/database.yml @@ -0,0 +1,21 @@ +production: + adapter: postgresql + encoding: unicode + pool: 20 + template: template1 +{{#if bind.database}} + {{#eachAlive bind.database.members as |member|}} + {{#if @last}} + host: {{member.sys.ip}} + port: {{member.cfg.port}} + username: {{member.cfg.superuser_name}} + password: {{member.cfg.superuser_password}} + {{/if}} + {{/eachAlive}} +{{else}} + host: {{cfg.db.host}} + port: {{cfg.db.port}} + password: {{cfg.db.password}} + username: {{cfg.db.user}} +{{/if}} + database: {{cfg.db.name}} diff --git a/src/oc-id/habitat/config/production.yml b/src/oc-id/habitat/config/production.yml new file mode 100644 index 0000000000..3638a966de --- /dev/null +++ b/src/oc-id/habitat/config/production.yml @@ -0,0 +1,11 @@ +--- +chef: + endpoint: https://172.18.0.1 + superuser: pivotal + ssl_verify_mode: verify_none +doorkeeper: + administrators: [] +sentry_dsn: +sign_up_url: https:///signup +email_from_address: '"Opscode" ' +origin: diff --git a/src/oc-id/habitat/config/secret_key_base.sh b/src/oc-id/habitat/config/secret_key_base.sh new file mode 100644 index 0000000000..110aaeefff --- /dev/null +++ b/src/oc-id/habitat/config/secret_key_base.sh @@ -0,0 +1,9 @@ +{{#if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} +export SECRET_KEY_BASE="{{member.cfg.secrets.oc_id.secret_key_base}}" + {{~ /if}} + {{~ /eachAlive}} +{{else}} +export SECRET_KEY_BASE="{{cfg.secret_key_base}}" +{{/if}} diff --git a/src/oc-id/habitat/config/secret_token.rb b/src/oc-id/habitat/config/secret_token.rb new file mode 100644 index 0000000000..404754220b --- /dev/null +++ b/src/oc-id/habitat/config/secret_token.rb @@ -0,0 +1,9 @@ +{{#if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} +OcId::Application.config.secret_key_base = "{{member.cfg.secrets.oc_id.secret_key_base}}" + {{~ /if}} + {{~ /eachAlive}} +{{else}} +OcId::Application.config.secret_key_base = "{{cfg.secret_key_base}}" +{{/if}} diff --git a/src/oc-id/habitat/config/veil-secrets.json b/src/oc-id/habitat/config/veil-secrets.json new file mode 100644 index 0000000000..cc9ade9211 --- /dev/null +++ b/src/oc-id/habitat/config/veil-secrets.json @@ -0,0 +1,7 @@ +{{~ #if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} +{{toJson member.cfg.secrets}} + {{~ /if}} + {{~ /eachAlive}} +{{~ /if}} diff --git a/src/oc-id/habitat/default.toml b/src/oc-id/habitat/default.toml new file mode 100644 index 0000000000..19de282f06 --- /dev/null +++ b/src/oc-id/habitat/default.toml @@ -0,0 +1,10 @@ +port=9090 +secret_key_base="_will_get_updated_dynamically_" +rails_env="production" + +[db] +host="localhost" +port=5432 +user="oc_id" +password="" +name="oc_id" diff --git a/src/oc-id/habitat/hooks/health_check b/src/oc-id/habitat/hooks/health_check new file mode 100644 index 0000000000..dfb858894b --- /dev/null +++ b/src/oc-id/habitat/hooks/health_check @@ -0,0 +1,20 @@ +#!/bin/sh +# + +# Health Check for oc_id + +# default return code is 0 +rc=0 + +{{pkgPathFor "core/curl"}}/bin/curl -X GET -sS --fail --max-time 2 http://localhost:9090/id/signin -I | head -1 + +case $? in + # Zero exit status means curl got back a 200 end everything is ok. + 0) + rc=0 ;; + # Anything else is critical. + *) + rc=2 ;; +esac + +exit $rc diff --git a/src/oc-id/habitat/hooks/init b/src/oc-id/habitat/hooks/init new file mode 100644 index 0000000000..5f4503b0ff --- /dev/null +++ b/src/oc-id/habitat/hooks/init @@ -0,0 +1,22 @@ +#!/bin/bash + +exec 2>&1 + +source "{{pkg.svc_config_path}}/database-migrations.sh" + +SRC="{{pkg.path}}/oc_id" +DEST={{pkg.svc_files_path}} + +# This rsync is because the rails app needs to access the templated `config` dir +# as a subdirectory of its install location. Symlinking `config` from pkg.path +# doesn't work because that requires root to write in pkg.path. +# Placing the core oc_id rails app in pkg.svc_files_path allows for a non root +# container. + +cd $DEST +ln -sf {{pkg.svc_config_path}} config +{{pkgPathFor "core/rsync"}}/bin/rsync -a $SRC/ . \ + --keep-dirlinks \ + --exclude vendor/ \ + --exclude database.yml \ + --exclude production.yml diff --git a/src/oc-id/habitat/hooks/run b/src/oc-id/habitat/hooks/run new file mode 100644 index 0000000000..dd665dc570 --- /dev/null +++ b/src/oc-id/habitat/hooks/run @@ -0,0 +1,19 @@ +#!/bin/bash + +exec 2>&1 + +export RAILS_ENV={{cfg.rails_env}} +export "HOME={{pkg.svc_files_path}}" +export LD_RUN_PATH=$(hab pkg path "core/gcc-libs")/lib:$LD_RUN_PATH +export VERSION=`ls -1 {{pkg.svc_files_path}}/db/migrate | tail -n 1 | $(hab pkg path "core/sed")/bin/sed -e "s/_.*//g"` +export LD_LIBRARY_PATH=$LD_RUN_PATH +export PATH=$HOME/bin:$PATH +export CHEF_SECRETS_DATA=$(cat {{pkg.svc_config_path}}/veil-secrets.json) + +cd $HOME +echo "DB VERSION: $VERSION" +# Habitat Todo +# move assets compile into the build phase +$(hab pkg path "core/bundler")/bin/bundle exec bin/rake assets:precompile +$(hab pkg path "core/bundler")/bin/bundle exec bin/rake db:migrate +$(hab pkg path "core/bundler")/bin/bundle exec bin/rails server -p {{cfg.port}} -b 0.0.0.0 diff --git a/src/oc-id/habitat/plan.sh b/src/oc-id/habitat/plan.sh new file mode 100644 index 0000000000..e16369ba27 --- /dev/null +++ b/src/oc-id/habitat/plan.sh @@ -0,0 +1,107 @@ +pkg_name=oc_id +pkg_origin=chef-server +pkg_maintainer="The Chef Server Maintainers " +pkg_license=('Apache-2.0') +pkg_deps=( + core/sqitch + core/curl + core/node + core/bundler + core/ruby + core/rsync + core/sed + core/libffi + core/sqlite + chef-server/dbdpg + core/tzdata +) +pkg_build_deps=( + core/git + core/make + core/gcc + core/tar + core/pkg-config + core/coreutils +) +pkg_binds_optional=( + [database]="port" + [chef-server-ctl]="secrets" +) +pkg_exports=( + [port]="port" +) +pkg_exposes=(port) +pkg_bin_dirs=(binstubs) + +pkg_version() { + cat "$PLAN_CONTEXT/../../../VERSION" +} + +do_before() { + do_default_before + if [ ! -f "$PLAN_CONTEXT/../../../VERSION" ]; then + exit_with "Cannot find VERSION file! You must run \"hab studio enter\" from the chef-server project root." 56 + fi + update_pkg_version +} + +do_unpack() { + # Copy everything over to the cache path so we don't write out our compiled + # deps into the working directory, but into the cache directory. + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname" + cp -R "$PLAN_CONTEXT/../"* "$HAB_CACHE_SRC_PATH/$pkg_dirname" +} + +do_prepare() { + # clean up any lingering bundle artifacts + rm -rf $PLAN_CONTEXT/../.bundle +} + +do_build() { + export LD_LIBRARY_PATH="$(pkg_path_for core/libffi)/lib:$(pkg_path_for core/sqlite)/lib" + export USE_SYSTEM_LIBFFI=1 + export C_INCLUDE_PATH="$(pkg_path_for core/sqlite)/include" + export BUNDLE_SILENCE_ROOT_WARNING=1 +} + +_tar_pipe_app_cp_to() { + local dst_path tar + dst_path="$1" + tar="$(pkg_path_for tar)/bin/tar" + + "$tar" -cp \ + --owner=root:0 \ + --group=root:0 \ + --no-xattrs \ + --exclude-backups \ + --exclude-vcs \ + --exclude='habitat' \ + --exclude='vendor/bundle' \ + --exclude='results' \ + --files-from=- \ + -f - \ + | "$tar" -x \ + -C "$dst_path" \ + -f - +} + +do_install() { + _bundler_dir="$(pkg_path_for bundler)" + export HOME="${pkg_prefix}/oc_id" + mkdir $HOME + { git ls-files; git ls-files --exclude-standard --others; } \ + | _tar_pipe_app_cp_to "$HOME" + bundle config path ${HOME}/vendor/bundle + bundle config build.sqlite3 --with-sqlite3-lib=$(pkg_path_for core/sqlite)/lib + bundle install --path "${HOME}/vendor/bundle" --binstubs="${HOME}/bin" --shebang ruby --deployment + # fix tzdata location + echo "Adding core/tzdata zoneinfo search path to tzinfo gem" + grep -l DEFAULT_SEARCH_PATH $HOME/vendor/bundle/ruby/*/gems/tzinfo*/lib/tzinfo/zoneinfo_data_source.rb | while read -r f; do + sed -e "s,/etc/zoneinfo,$(pkg_path_for core/tzdata)/share/zoneinfo,g" -i "$f" + done +} + +# needed due to libffi Bad value error +do_strip() { + return 0 +} diff --git a/src/oc_bifrost/habitat/config/database-migrations.sh b/src/oc_bifrost/habitat/config/database-migrations.sh new file mode 100644 index 0000000000..87545544bf --- /dev/null +++ b/src/oc_bifrost/habitat/config/database-migrations.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e + +# TODO: not sure how to handle this. Sqitch bombs when it can't find the timezone +export TZ="UTC" + +export PERL5LIB="$(cat "$(hab pkg path "chef-server/dbdpg")/LD_RUN_PATH")" +export PATH=$PATH:$(hab pkg path "core/postgresql")/bin:$(hab pkg path "core/sqitch")/bin + +{{#if bind.database}} + {{#eachAlive bind.database.members as |member|}} + {{#if @last}} +HOST="{{member.sys.ip}}" +PORT="{{member.cfg.port}}" +USER="{{member.cfg.superuser_name}}" +PASS="{{member.cfg.superuser_password}}" +DB="bifrost" + {{/if}} + {{/eachAlive}} +{{else}} +HOST="{{cfg.postgresql.vip}}" +PORT="{{cfg.postgresql.port}}" +USER="{{cfg.sql_user}}" +PASS="{{cfg.sql_password}}" +DB="bifrost" +{{/if}} + +PG_ARGS="--host "$HOST" --port "$PORT" --username "$USER"" +export PGPASSWORD="$PASS" + +# Wait until postgres is ready +until pg_isready $PG_ARGS --quiet; do :; done + +# Create delivery db for sqitch to deploy to +createdb $PG_ARGS $DB "oc_bifrost" + +# Install uuid-ossp extension +psql $PG_ARGS --command 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"' $DB + +cd "{{pkg.path}}/schema" || exit +sqitch --quiet --engine pg deploy "db:pg://${USER}:${PASS}@${HOST}/$DB" diff --git a/src/oc_bifrost/habitat/config/sys.config b/src/oc_bifrost/habitat/config/sys.config new file mode 100644 index 0000000000..60a8ab311f --- /dev/null +++ b/src/oc_bifrost/habitat/config/sys.config @@ -0,0 +1,144 @@ +%% -*- mode: erlang -*- +%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*- %% ex: ts=4 sw=4 ft=erlang et +%% +%% oc_bifrost sys.config file +%% +%% Automatically generated by Chef +%% +[ + {kernel, [{start_pg2, true}, + {inet_dist_use_interface, {127,0,0,1}} + ]}, + {sasl, [ + {sasl_error_logger, {file, "{{pkg.svc_var_path}}/logs/sasl-error.log"}}, + {errlog_type, error}, + {error_logger_mf_dir, "{{pkg.svc_var_path}}/logs"}, % Log directory + {error_logger_mf_maxbytes, 104857600}, + {error_logger_mf_maxfiles, 10} + ]}, + {chef_secrets, [ + {provider, chef_secrets_env} + ]}, + {lager, [ + %% What handlers to install with what arguments + %% The defaults for the logfiles are to rotate the files when + %% they reach 10Mb or at midnight, whichever comes first, and keep + %% the last 5 rotations. See the lager README for a description of + %% the time rotation format: + %% https://github.com/basho/lager/blob/master/README.md#internal-log-rotation + %% + %% If you wish to disable rotation, you can either set the size to 0 + %% and the rotation time to "", or instead specify a 2-tuple that only + %% consists of {Logfile, Level}. + {handlers, [ + {lager_console_backend, [{{cfg.lager.lager_console_backend}}, {lager_default_formatter, [ "[", severity, "] ", message, "\n"]}]}, + {lager_file_backend, [ + {file, "{{pkg.svc_var_path}}/logs/error.log"}, + {level, error}, + {size, 104857600}, + {date, "$D0"}, + {count, 10} + ]}, + {lager_file_backend, [ + {file, "{{pkg.svc_var_path}}/logs/console.log"}, + {level, info}, + {size, 104857600}, + {date, "$D0"}, + {count, 10}, + %% Removes the logging source, since currently + %% there's only one place in all of Bifrost that + %% we log from. Also removes the PID from + %% the default message format. + {formatter_config, [date, " ", time, " [", severity, "] ", message, "\n"]} + ]} + ]}, + + %% Whether to write a crash log, and where. + %% Commented/omitted/undefined means no crash logger. + {crash_log, "{{pkg.svc_var_path}}/logs/crash.log"}, + + %% Maximum size in bytes of events in the crash log - defaults to 65536 + {crash_log_msg_size, 65536}, + + %% Maximum size of the crash log in bytes, before its rotated, set + %% to 0 to disable rotation - default is 0 + {crash_log_size, 104857600}, + + %% What time to rotate the crash log - default is no time + %% rotation. See the lager README for a description of this format: + %% https://github.com/basho/lager/blob/master/README.org + {crash_log_date, "$D0"}, + + %% Number of rotated crash logs to keep, 0 means keep only the + %% current one - default is 0 + {crash_log_count, 10}, + + %% Whether to redirect error_logger messages into lager - defaults to true + {error_logger_redirect, true}, + + %% Bump up the "high-water mark" (default 50), which is the + %% number of messages per second allowed to come from + %% error_logger. This is the same as used by + %% opscode-chef-mover, FWIW. + {error_logger_hwm, 1000} + ]}, + {webmachine, [ + {log_handlers, [ + {oc_wm_request_logger, [ + {file, "{{pkg.svc_var_path}}/logs/crash.log"}, + {file_size, 100}, %% Size in MB + {files, 10}, + {annotations, [requestor_id, created_authz_id, perf_stats, msg]} + ] + }]}] + }, + {sqerl, [ + {db_driver_mod, sqerl_pgsql_client}, + {ip_mode, [ {{cfg.private_chef.ip_mode}} ] }, +{{#if bind.database ~}} + {{#eachAlive bind.database.members as |member| ~}} + {{~#if @last }} + {db_host, "{{member.sys.ip}}"}, + {db_port, {{member.cfg.port}} }, + {db_user, "{{member.cfg.superuser_name}}"}, + {db_pass, "{{member.cfg.superuser_password}}"}, + {{/if ~}} + {{/eachAlive ~}} +{{~else}} + {db_host, "{{cfg.sqerl.db_host}}"}, + {db_port, {{cfg.sqerl.db_port}} }, + {db_user, "{{cfg.sqerl.db_user}}"}, + {db_pass, "{{cfg.sqerl.db_pass}}"}, +{{/if ~}} + {db_name, "{{cfg.sqerl.db_name}}" }, + {idle_check, 10000}, + {pooler_timeout, {{cfg.sqerl.db_pooler_timeout}} }, + {db_timeout, {{cfg.sqerl.sql_db_timeout}} }, + {prepared_statements, {bifrost_db, statements, []} }, + {column_transforms, []} + ]}, + {stats_hero, [ + {udp_socket_pool_size, 20 }, + {protocol, estatsd}, + {estatsd_host, "127.0.0.1" }, + {estatsd_port, 9466 } + ]}, + {pooler, [ + {pools, [ +{{~#with cfg/pooler/sqerl }} + [{name, sqerl}, + {max_count, {{max_count}} }, + {init_count, {{init_count}} }, + {queue_max, {{queue_max}} }, + {start_mfa, {sqerl_client, start_link, []}}] +{{~/with ~}} + ]} + %%,{metrics_module, folsom_metrics} + ]}, + {bifrost, [ + {ip, "0.0.0.0" }, + {port, {{cfg.port}} }, + {root_metric_key, "bifrost"}, + {enable_extended_perf_log, true} + ]} +]. diff --git a/src/oc_bifrost/habitat/config/veil-secrets.json b/src/oc_bifrost/habitat/config/veil-secrets.json new file mode 100644 index 0000000000..cc9ade9211 --- /dev/null +++ b/src/oc_bifrost/habitat/config/veil-secrets.json @@ -0,0 +1,7 @@ +{{~ #if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} +{{toJson member.cfg.secrets}} + {{~ /if}} + {{~ /eachAlive}} +{{~ /if}} diff --git a/src/oc_bifrost/habitat/config/vm.args b/src/oc_bifrost/habitat/config/vm.args new file mode 100644 index 0000000000..9ee6e9d72f --- /dev/null +++ b/src/oc_bifrost/habitat/config/vm.args @@ -0,0 +1,40 @@ +## Name of the node +-name bifrost@127.0.0.1 + +## Cookie for distributed erlang +-setcookie oc_bifrost + +## Enable SMP even on single-core machines +## The Erchef keygen cache feature uses +## enif_send, which does not work on single- +## core machines unless SMP is enabled. +-smp enable + +## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive +## (Disabled by default..use with caution!) +##-heart + +## Enable kernel poll and a few async threads ++K true ++A 10 + ++P 262144 + +## Increase number of concurrent ports/sockets +-env ERL_MAX_PORTS 65536 + +## Add directory for hot patches to the front of the code path +-pa lib/patches + +## Tweak GC to run more often +-env ERL_FULLSWEEP_AFTER 10 + +## Increase logfile size to 10M +-env RUN_ERL_LOG_MAXSIZE 10000000 + +{{#if cfg.fips_enabled ~}} +## Runtime switches to enable loading custom crypto module +## that supports OpenSSL-FIPS +-env ERLANG_CRYPTO2_PATH {{pkg.path}}/lib/erlang-crypto2/priv +-pa {{pkg.path}}/lib/erlang-crypto2/ebin +{{/if ~}} diff --git a/src/oc_bifrost/habitat/default.toml b/src/oc_bifrost/habitat/default.toml new file mode 100644 index 0000000000..75b9ff3c4d --- /dev/null +++ b/src/oc_bifrost/habitat/default.toml @@ -0,0 +1,26 @@ +port=9463 + +[private_chef] +ip_mode="ipv4" +fips_enabled=false + +[bifrost] + +[lager] +lager_console_backend="info" + +[sqerl] +db_host="localhost" +db_port=5432 +db_user="bifrost" +db_pass="chefrocks" +db_name="bifrost" +db_pooler_timeout=2000 +sql_db_timeout=5000 + +[webmachine] + +[pooler.sqerl] +max_count=20 +init_count=20 +queue_max=200 diff --git a/src/oc_bifrost/habitat/hooks/health_check b/src/oc_bifrost/habitat/hooks/health_check new file mode 100644 index 0000000000..31b46eadf7 --- /dev/null +++ b/src/oc_bifrost/habitat/hooks/health_check @@ -0,0 +1,20 @@ +#!/bin/sh +# + +# Health Check for oc_bifrost + +# default return code is 0 +rc=0 + +{{pkgPathFor "core/curl"}}/bin/curl -X GET -sS --fail --max-time 2 http://localhost:9463/_status + +case $? in + # Zero exit status means curl got back a 200 end everything is ok. + 0) + rc=0 ;; + # Anything else is critical. + *) + rc=2 ;; +esac + +exit $rc diff --git a/src/oc_bifrost/habitat/hooks/init b/src/oc_bifrost/habitat/hooks/init new file mode 100644 index 0000000000..e9c0b5c1a5 --- /dev/null +++ b/src/oc_bifrost/habitat/hooks/init @@ -0,0 +1,7 @@ +#!/bin/bash + +exec 2>&1 + +mkdir -p {{pkg.svc_var_path}}/logs + +source "{{pkg.svc_config_path}}/database-migrations.sh" diff --git a/src/oc_bifrost/habitat/hooks/run b/src/oc_bifrost/habitat/hooks/run new file mode 100644 index 0000000000..45d147e7c1 --- /dev/null +++ b/src/oc_bifrost/habitat/hooks/run @@ -0,0 +1,14 @@ +#!/bin/sh +exec 2>&1 + +export HOME={{pkg.svc_var_path}} +export RUNNER_LOG_DIR="{{pkg.svc_var_path}}/logs" +export RELX_CONFIG_PATH="{{pkg.svc_config_path}}/sys.config" +export VMARGS_PATH="{{pkg.svc_config_path}}/vm.args" +export CHEF_SECRETS_DATA=$(cat {{pkg.svc_config_path}}/veil-secrets.json) + +{{#if cfg.fips_enabled ~}} +export OPENSSL_FIPS=1 +{{/if ~}} + +exec oc_bifrost foreground diff --git a/src/oc_bifrost/habitat/plan.sh b/src/oc_bifrost/habitat/plan.sh new file mode 100644 index 0000000000..eb05a2f492 --- /dev/null +++ b/src/oc_bifrost/habitat/plan.sh @@ -0,0 +1,101 @@ +pkg_name=oc_bifrost +pkg_origin=chef-server +pkg_license=('Apache-2.0') +pkg_maintainer="The Chef Server Maintainers " +pkg_source="nosuchfile.tar.gz" +pkg_deps=( + core/erlang18 + core/cacerts + core/coreutils + core/curl + core/gcc-libs + core/sqitch + chef-server/dbdpg +) +pkg_build_deps=(core/make core/git core/gcc core/bundler core/ruby) +pkg_lib_dirs=(lib) +pkg_include_dirs=(include) +pkg_bin_dirs=(bin) +pkg_description="Erlang implementation of the Chef Server's Auth system." +pkg_upstream_url="https://github.com/chef/chef-server" +pkg_exports=( + [port]="port" +) +pkg_exposes=(port) + +pkg_binds_optional=( + [database]="port" + [chef-server-ctl]="secrets" +) + +pkg_version() { + cat "$PLAN_CONTEXT/../../../VERSION" +} + +do_before() { + do_default_before + if [ ! -f "$PLAN_CONTEXT/../../../VERSION" ]; then + exit_with "Cannot find VERSION file! You must run \"hab studio enter\" from the chef-server project root." 56 + fi + update_pkg_version +} + +do_download() { + return 0 +} + +do_verify() { + return 0 +} + +do_unpack() { + # Copy everything over to the cache path so we don't write out our compiled + # deps into the working directory, but into the cache directory. + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname" + cp -R "$PLAN_CONTEXT/../"* "$HAB_CACHE_SRC_PATH/$pkg_dirname" +} + +do_prepare() { + # The `/usr/bin/env` path is hardcoded in jiffy, so we'll add a symlink since fix_interpreter won't work. + if [[ ! -r /usr/bin/env ]]; then + ln -sv "$(pkg_path_for coreutils)/bin/env" /usr/bin/env + _clean_env=true + fi + + # Need this for enterprise_ctl to compile with an old version of rebar + build_line "Setting PATH=$PATH:$HAB_CACHE_SRC_PATH/$pkg_dirname" + export PATH=$PATH:"$HAB_CACHE_SRC_PATH/$pkg_dirname" + + build_line "RELX_OUTPUT_DIR=$pkg_prefix" + export RELX_OUTPUT_DIR=$pkg_prefix + + git config --global http.sslCAInfo \ + "$(pkg_path_for core/cacerts)"/ssl/certs/cacert.pem + + fix_interpreter ./rebar3 core/coreutils bin/env +} + + +do_build() { + _bundler_dir="$(pkg_path_for bundler)" + export REL_VERSION=$pkg_version + export GEM_HOME="${pkg_path}/vendor/bundle" + export GEM_PATH="${_bundler_dir}:${GEM_HOME}" + make omnibus +} + +do_install() { + cp -rv "_build/default/rel/oc_bifrost/"* "${pkg_prefix}" + cp -R "$HAB_CACHE_SRC_PATH/$pkg_dirname/schema" "$pkg_prefix" +} + +do_check() { + make +} + +do_end() { + # Clean up the `env` link, if we set it up. + if [[ -n "$_clean_env" ]]; then + rm -fv /usr/bin/env + fi +} diff --git a/src/oc_erchef/Gemfile_habitat b/src/oc_erchef/Gemfile_habitat new file mode 100644 index 0000000000..c9efd84f90 --- /dev/null +++ b/src/oc_erchef/Gemfile_habitat @@ -0,0 +1,5 @@ +source "https://rubygems.org" + +gem 'rest-client' +gem 'pg' +gem 'openssl' diff --git a/src/oc_erchef/Gemfile_habitat.lock b/src/oc_erchef/Gemfile_habitat.lock new file mode 100644 index 0000000000..86a86a91f9 --- /dev/null +++ b/src/oc_erchef/Gemfile_habitat.lock @@ -0,0 +1,31 @@ +GEM + remote: https://rubygems.org/ + specs: + domain_name (0.5.20170404) + unf (>= 0.0.5, < 1.0.0) + http-cookie (1.0.3) + domain_name (~> 0.5) + mime-types (3.1) + mime-types-data (~> 3.2015) + mime-types-data (3.2016.0521) + netrc (0.11.0) + openssl (2.0.6) + pg (0.21.0) + rest-client (2.0.2) + http-cookie (>= 1.0.2, < 2.0) + mime-types (>= 1.16, < 4.0) + netrc (~> 0.8) + unf (0.1.4) + unf_ext + unf_ext (0.0.7.4) + +PLATFORMS + ruby + +DEPENDENCIES + openssl + pg + rest-client + +BUNDLED WITH + 1.15.4 diff --git a/src/oc_erchef/habitat/config/chef_server_data_bootstrap.rb b/src/oc_erchef/habitat/config/chef_server_data_bootstrap.rb new file mode 100644 index 0000000000..936b6a1939 --- /dev/null +++ b/src/oc_erchef/habitat/config/chef_server_data_bootstrap.rb @@ -0,0 +1,407 @@ +require 'fileutils' +require 'restclient' +require 'json' + +class EcPostgres + # Provides a superuser connection to the specified database + def self.with_connection(database = 'template1', opts = {}) + require 'pg' + + postgres = {} +{{#if bind.database}} + {{#eachAlive bind.database.members as |member|}} + {{#if @last}} + postgres['vip']="{{member.sys.ip}}" + postgres['port']="{{member.cfg.port}}" + postgres['db_superuser']="{{member.cfg.superuser_name}}" + postgres['db_superuser_password']="{{member.cfg.superuser_password}}" + {{/if}} + {{/eachAlive}} +{{else}} + postgres['vip']="{{cfg.postgresql.vip}}" + postgres['port']="{{cfg.postgresql.port}}" + postgres['db_superuser']="{{cfg.sql_user}}" + postgres['db_superuser_password']="{{cfg.sql_password}}" +{{/if}} + + connection = nil + + # Some callers expect failure - this gives the option to suppress + # error logging to avoid confusing output. + if opts['silent'] + silent = true + # If a caller specfies silent, it means they anticipate an error to be + # likely. Don't force over a minute of retries in that case. + retries = opts['retries'] || 1 + else + silent = false + retries = opts['retries'] || 5 + end + max_retries = retries + begin + connection = ::PG::Connection.open('user' => postgres['db_superuser'], + 'host' => postgres['vip'], + 'password' => postgres['db_superuser_password'], + 'port' => postgres['port'], + 'dbname' => database) + rescue => e + if retries > 0 + sleep_time = 2**((max_retries - retries)) + retries -= 1 + unless silent + puts "Error from postgresql: #{e.message.chomp}. Retrying after #{sleep_time}s. Retries remaining: #{retries + 1}" + end + sleep sleep_time + retry + else + unless silent + puts "Error from postgresql: #{e.message.chomp}. Retries have been exhausted." + end + raise + end + end + + begin + yield connection + ensure + connection.close + end + end + + def self.as_user(user) + # Find the user in the password database. + u = (user.is_a? Integer) ? Etc.getpwuid(user) : Etc.getpwnam(user) + + old_process_euid = Process.euid + Process::UID.eid = u.uid + begin + yield + ensure + Process::UID.eid = old_process_euid + end + end +end + + +class ChefServerDataBootstrap + + GLOBAL_ORG_ID = "00000000000000000000000000000000" + attr_reader :bifrost, :superuser_guid, :superuser_public_key, :superuser_authz_id, :bootstrap_time, :server_admins_authz_id + + def initialize + @bootstrap_time = Time.now.utc.to_s + end + + + def bifrost_superuser_id + @superuser_id ||= bifrost_superuser_id_from_secrets_file + end + + def bifrost_superuser_id_from_secrets_file + secrets = JSON.parse(File.read('{{pkg.svc_config_path}}/veil-secrets.json')) + secrets['oc_bifrost']['superuser_id'] + end + + def bootstrap + # TODO: Need to cleanly guard that we only do this in one instance of chef-server-ctl + + + puts "Bootstrapping Chef Server Data" + # This is done in a few stages. First we will see if the pivotal user exist + EcPostgres.with_connection('opscode_chef') do |conn| + get_or_create_superuser_in_erchef(conn) + end + + # Next we'll first create the bifrost objects and + # dependencies. If this fails, it can be re-run idempotently without + # risk of causing the run to fail. + + users_authz_id = create_container_in_authz(superuser_authz_id) + orgs_authz_id = create_container_in_authz(superuser_authz_id) + create_server_admins_global_group_in_bifrost(users_authz_id) + + # put pivotal in server-admins global group + insert_authz_actor_into_group(server_admins_authz_id, superuser_authz_id) + + # Now that bifrost operations are complete, create the corresponding + # objects in erchef. By separating them, we increase the chance that a + # bootstrap failed due to an error out of bifrost can be recovered + # by re-running it. + EcPostgres.with_connection('opscode_chef') do |conn| + rekey_superuser(conn) + create_server_admins_global_group_in_erchef(conn) + create_global_container_in_erchef(conn, 'organizations', orgs_authz_id) + create_global_container_in_erchef(conn, 'users', users_authz_id) + end + + # touch the bootstrapped file + # FileUtils.touch '{{pkg.svc_data_path}}/bootstrapped' + puts "Chef server successfully bootstrapped" + end + + private + + # Create and set up permissions for the server admins group. + def create_server_admins_global_group_in_bifrost(users_authz_id) + @server_admins_authz_id = create_group_in_authz(bifrost_superuser_id) + %w{create read update delete}.each do |permission| + # grant server admins group permission on the users container, + # as the erchef superuser. + grant_authz_object_permission(permission, "groups", "containers", users_authz_id, + server_admins_authz_id, superuser_authz_id) + # grant superuser actor permissions on the server admin group, + # as the bifrost superuser + grant_authz_object_permission(permission, "actors", "groups", server_admins_authz_id, + superuser_authz_id, bifrost_superuser_id) + end + + # Grant server-admins read permissions on itself as the bifrost superuser. + grant_authz_object_permission("read", "groups", "groups", server_admins_authz_id, + server_admins_authz_id, bifrost_superuser_id) + end + + # Insert the server admins global group into the erchef groups table. + def create_server_admins_global_group_in_erchef(conn) + # cheating and re-using server_admins_authz_id as the primary key id + # this may be bad?? + simple_insert(conn, 'groups', server_admins_authz_id, + id: server_admins_authz_id, + org_id: GLOBAL_ORG_ID, + authz_id: server_admins_authz_id, + name: 'server-admins', + last_updated_by: superuser_authz_id, + created_at: bootstrap_time, + updated_at: bootstrap_time) + end + + # insert the erchef superuser's key into the erchef keys table, + # and the user record into the users table. + # As a side effect it sets the superuser_authz_id + def get_or_create_superuser_in_erchef(conn) + require 'openssl' + load_superuser_public_key() + user = get_superuser(conn) + + if user.nil? + # create it + # TODO Guard against multiple instances racing + + # this can only be created when initializing a system, and should never ever change after + @superuser_authz_id = create_actor_in_authz(bifrost_superuser_id) + user_id = SecureRandom.uuid.gsub("-", "") + + user = {id: user_id, + username: 'pivotal', + email: 'root@localhost.localdomain', + authz_id: @superuser_authz_id, + created_at: bootstrap_time, + updated_at: bootstrap_time, + last_updated_by: bifrost_superuser_id, + public_key: superuser_public_key, + pubkey_version: 0, # Old constraint requires it to be not-null + serialized_object: JSON.generate( + first_name: "Chef", + last_name: "Server", + display_name: "Chef Server Superuser"), + admin: false, + recovery_auth_enabled: false + } + + add_user_keys = %i{id authz_id username email public_key pubkey_version hashed_pw + salt hash_type last_updated_by created_at updated_at + external_auth_uid recovery_auth_enabled serialized_object admin} + + sql = %{ + SELECT add_user(#{make_placeholders(add_user_keys.length)}) + } + args = extract_args_in_order(add_user_keys, user) + + result = conn.exec_params(sql, args) + + if result[0]["add_user"] == "1" + puts "Create superuser succeeded" + else + puts "Create superuser failed" + exit(1) + end + + user = get_superuser(conn) + end + @superuser_authz_id = user[:authz_id] + @superuser_guid = user[:id] + user + end + + def rekey_superuser(conn) + load_superuser_public_key() + # we assume the default key exists, because add_user above guarantees it + sql = %{ + UPDATE keys SET (public_key, created_at, expires_at) + = ('#{@superuser_public_key}', '#{bootstrap_time}', 'infinity') + WHERE id = '#{superuser_guid}' AND key_name = 'default' + } + + result = conn.exec(sql) + puts "Superuser key update successful #{result.cmd_status}" + end + + def create_global_container_in_erchef(conn, name, authz_id) + simple_insert(conn, 'containers', authz_id, + id: authz_id, # TODO is this right? + name: name, + authz_id: authz_id, + org_id: GLOBAL_ORG_ID, + last_updated_by: superuser_authz_id, + created_at: bootstrap_time, + updated_at: bootstrap_time) + end + + # db helper to construct and execute a simple insert statement + def simple_insert(conn, table, pkey, fields) + placeholders = make_placeholders(fields.length) + begin + puts "Bootstrapping superuser data into chefs #{table} table" + sql = %{ + INSERT INTO #{table} (#{fields.keys.join(", ")}) + VALUES (#{placeholders}) + } + conn.exec_params(sql, fields.values) + rescue PG::UniqueViolation => e + # puts "Got UniqueViolation #{e.inspect}" + puts "Pre-existing superuser record found in chefs #{table} table, attempting to update" + sql = %{ + UPDATE #{table} SET (#{fields.keys.join(", ")}) + = (#{placeholders}) WHERE id = '{pkey}' + } + conn.exec_params(sql, fields.values) + puts "Update successful" + end + end + + def get_superuser(conn) + users = begin + sql = %{ + SELECT * FROM USERS WHERE username = 'pivotal' + } + result = conn.exec(sql) + Hash[result[0].map{ |k, v| [k.to_sym, v] }] + rescue + nil + end + users + end + + def make_placeholders(count) + (1..count).map {|x| "$#{x}"}.join(", ") + end + + def extract_args_in_order(keys, hash) + keys.map {|k| hash.has_key?(k) ? hash[k] : nil} + end + + + ## Bifrost access helpers. + + def create_group_in_authz(requestor_id) + create_object_in_authz("groups", requestor_id) + end + + def create_actor_in_authz(requestor_id) + create_object_in_authz("actors", requestor_id) + end + + def create_container_in_authz(requestor_id) + create_object_in_authz("containers", requestor_id) + end + + def create_object_in_authz(object_name, requestor_id) + result = bifrost_request(:post, "#{object_name}", "{}", requestor_id) + JSON.parse(result)["id"] + end + + # Tells bifrost that an actor is a member of a group. + # Group membership is managed through bifrost, and not via erchef. + def insert_authz_actor_into_group(group_id, actor_id) + bifrost_request(:put, "/groups/#{group_id}/actors/#{actor_id}", "{}", superuser_authz_id) + end + + def grant_authz_object_permission(permission_type, granted_to_object_type, granted_on_object_type, granted_on_id, granted_to_id, requestor_id) + url = "#{granted_on_object_type}/#{granted_on_id}/acl/#{permission_type}" + body = JSON.parse(bifrost_request(:get, url, nil, requestor_id)) + body[granted_to_object_type] << granted_to_id + bifrost_request(:put, url, body.to_json, requestor_id) + end + + # Assemble the appropriate header per bifrost's expectations + # This automatically retries any failed request. It is not uncommon + # for bifrost to be unavailable when we're ready to start, as + # it's still spinning up. + # + def bifrost_request(method, rel_path, body, requestor_id) + headers = { + :content_type => :json, + :accept => :json, + 'X-Ops-Requesting-Actor-Id' => requestor_id + } + retries = 5 + begin + bifrost = load_bifrost + if method == :get + RestClient.get("http://#{bifrost['vip']}:#{bifrost['port']}/#{rel_path}", headers) + else + RestClient.send(method, "http://#{bifrost['vip']}:#{bifrost['port']}/#{rel_path}", body, headers) + end + rescue RestClient::Exception, Errno::ECONNREFUSED => e + error = e.respond_to?(:response) ? e.response.chomp : e.message + if retries > 0 + sleep_time = 2**((5 - retries)) + retries -= 1 + puts "Error from bifrost: #{error}, retrying after #{sleep_time}s. Retries remaining: #{retries}" + sleep sleep_time + retry + else + puts "Error from bifrost: #{error}, retries have been exhausted" + raise + end + end + end + + + # These are factored out and put at the end because mustache confuses my editors ruby mode + def load_superuser_public_key() +{{#if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} + @superuser_public_key = <<-EOF +{{ member.cfg.secrets.chef-server.superuser_pub_key }} +EOF + {{~ /if}} + {{~ /eachAlive}} +{{else}} + @superuser_public_key = "DUMMY KEY FROM BOOTSTRAP" +{{/if}} + end + + def load_bifrost() + bifrost={} +{{#if bind.oc_bifrost}} + {{#eachAlive bind.oc_bifrost.members as |member|}} + {{#if @last}} + bifrost['vip']="{{member.sys.ip}}" + bifrost['port']="{{member.cfg.port}}" + {{/if}} + {{/eachAlive}} +{{else}} + bifrost['vip']="{{cfg.oc_bifrost.vip}}" + bifrost['port']="{{cfg.oc_bifrost.port}}" +{{/if}} + bifrost + end + +end + +#if File.exist?('{{pkg.svc_data_path}}/bootstrapped') +# puts 'Chef Server Data already bootstrapped - Skipping.' +#else + ChefServerDataBootstrap.new.bootstrap +#end diff --git a/src/oc_erchef/habitat/config/dark_launch_features.json b/src/oc_erchef/habitat/config/dark_launch_features.json new file mode 100644 index 0000000000..762e7a36d6 --- /dev/null +++ b/src/oc_erchef/habitat/config/dark_launch_features.json @@ -0,0 +1,16 @@ +{ + "quick_start": false, + "new_theme": true, + "private-chef": true, + "sql_users": true, + "add_type_and_bag_to_items": true, + "reporting": true, + "actions": true, + "503_mode": false, + "couchdb_containers": false, + "couchdb_groups": false, + "couchdb_acls": false, + "couchdb_association_requests": false, + "couchdb_organizations": false, + "couchdb_associations": false +} diff --git a/src/oc_erchef/habitat/config/database-migrations.sh b/src/oc_erchef/habitat/config/database-migrations.sh new file mode 100644 index 0000000000..2234a85222 --- /dev/null +++ b/src/oc_erchef/habitat/config/database-migrations.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# TODO: not sure how to handle this. Sqitch bombs when it can't find the timezone +export TZ="UTC" + +export PERL5LIB="$(cat "$(hab pkg path "chef-server/dbdpg")/LD_RUN_PATH")" +export PATH=$PATH:$(hab pkg path "core/postgresql")/bin:$(hab pkg path "core/sqitch")/bin + +{{#if bind.database}} + {{#eachAlive bind.database.members as |member|}} + {{#if @last}} +HOST="{{member.sys.ip}}" +PORT="{{member.cfg.port}}" +USER="{{member.cfg.superuser_name}}" +PASS="{{member.cfg.superuser_password}}" +DB="opscode_chef" + {{/if}} + {{/eachAlive}} +{{else}} +HOST="{{cfg.postgresql.vip}}" +PORT="{{cfg.postgresql.port}}" +USER="{{cfg.sql_user}}" +PASS="{{cfg.sql_password}}" +DB="opscode_chef" +{{/if}} + +PG_ARGS="--host "$HOST" --port "$PORT" --username "$USER"" +export PGPASSWORD="$PASS" + +# Wait until postgres is ready +until pg_isready $PG_ARGS --quiet; do :; done + +# Create delivery db for sqitch to deploy to +createdb $PG_ARGS $DB "oc_bifrost" + +# Install uuid-ossp extension +psql $PG_ARGS --command 'CREATE EXTENSION IF NOT EXISTS "uuid-ossp"' $DB + +cd "{{pkg.path}}/schema/baseline" +sqitch --quiet --engine pg deploy "db:pg://${USER}:${PASS}@${HOST}/$DB" + +cd "{{pkg.path}}/schema" +sqitch --quiet --engine pg deploy "db:pg://${USER}:${PASS}@${HOST}/$DB" diff --git a/src/oc_erchef/habitat/config/elasticsearch-index-init.json b/src/oc_erchef/habitat/config/elasticsearch-index-init.json new file mode 100644 index 0000000000..dd40151b4d --- /dev/null +++ b/src/oc_erchef/habitat/config/elasticsearch-index-init.json @@ -0,0 +1,57 @@ +{ + "settings": { + "analysis": { + "analyzer": { + "default": { + "type": "whitespace" + } + } + }, + "number_of_shards": 3, + "number_of_replicas": 2 + }, + "mappings": { + "object": { + "_source": { + "enabled": false + }, + "_all": { + "enabled": false + }, + "properties": { + "X_CHEF_database_CHEF_X": { + "type": "string", + "index": "not_analyzed", + "norms": { + "enabled": false + } + }, + "X_CHEF_type_CHEF_X": { + "type": "string", + "index": "not_analyzed", + "norms": { + "enabled": false + } + }, + "X_CHEF_id_CHEF_X": { + "type": "string", + "index": "not_analyzed", + "norms": { + "enabled": false + } + }, + "data_bag": { + "type": "string", + "index": "not_analyzed", + "norms": { + "enabled": false + } + }, + "content": { + "type": "string", + "index": "analyzed" + } + } + } + } +} \ No newline at end of file diff --git a/src/oc_erchef/habitat/config/elasticsearch-init.sh b/src/oc_erchef/habitat/config/elasticsearch-init.sh new file mode 100644 index 0000000000..73d6c4f0be --- /dev/null +++ b/src/oc_erchef/habitat/config/elasticsearch-init.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +export PATH=$PATH:$(hab pkg path "core/curl")/bin + +{{#if bind.elasticsearch}} + {{#eachAlive bind.elasticsearch.members as |member|}} + {{#if @last}} +HOST="{{member.sys.ip}}" +PORT="{{member.cfg.http-port}}" + {{/if}} + {{/eachAlive}} +{{else}} +HOST="{{cfg.elasticsearch.vip}}" +PORT="{{cfg.elasticsearch.port}}" +{{/if}} + +curl -sS --retry 10 --retry-delay 2 --retry-connrefused -XPUT http://${HOST}:${PORT}/chef/ -d @{{pkg.svc_config_path}}/elasticsearch-index-init.json diff --git a/src/oc_erchef/habitat/config/private-chef-secrets.json b/src/oc_erchef/habitat/config/private-chef-secrets.json new file mode 100644 index 0000000000..0967ef424b --- /dev/null +++ b/src/oc_erchef/habitat/config/private-chef-secrets.json @@ -0,0 +1 @@ +{} diff --git a/src/oc_erchef/habitat/config/sys.config b/src/oc_erchef/habitat/config/sys.config new file mode 100644 index 0000000000..706782e08f --- /dev/null +++ b/src/oc_erchef/habitat/config/sys.config @@ -0,0 +1,358 @@ +%% -*- mode: erlang -*- +%% -*- tab-width: 4;erlang-indent-level: 4;indent-tabs-mode: nil -*- +%% ex: ts=4 sw=4 ft=eruby.erlang et +[ + {kernel, [{inet_dist_use_interface, {127,0,0,1}}]}, + %% SASL config + + {sasl, [ + {sasl_error_logger, {file, "{{pkg.svc_var_path}}/logs/sasl-error.log"}}, + {errlog_type, error}, + {error_logger_mf_dir, "{{pkg.svc_var_path}}/logs"}, % Log directory + {error_logger_mf_maxbytes, 104857600}, + {error_logger_mf_maxfiles, 10} + ]}, + + {lager, [ + {handlers, [ + {lager_console_backend, [info, {lager_default_formatter, [ "[", severity, "] ", message, "\n"]}]}, + {lager_file_backend, [ + {file, "{{pkg.svc_var_path}}/logs/erchef.log"}, + {level, info}, + {size, 104857600}, + {date, "$D0"}, + {count, 10}, + {formatter_config, [date, " ", time, " [", severity, "] ", message, "\n"]} + ]} + ]}, + + %% Whether to write a crash log, and where. + %% Commented/omitted/undefined means no crash logger. + {crash_log, "{{pkg.svc_var_path}}/logs/crash.log"}, + + %% Maximum size in bytes of events in the crash log - defaults to 65536 + {crash_log_msg_size, 65536}, + + %% Maximum size of the crash log in bytes, before its rotated, set + %% to 0 to disable rotation - default is 0 + {crash_log_size, 104857600}, + + %% What time to rotate the crash log - default is no time + %% rotation. See the lager README for a description of this format: + %% https://github.com/basho/lager/blob/master/README.org + {crash_log_date, "$D0"}, + + %% Number of rotated crash logs to keep, 0 means keep only the + %% current one - default is 0 + {crash_log_count, 10}, + + %% Whether to redirect error_logger messages into lager - defaults to true + {error_logger_redirect, true}, + + %% Bump up the "high-water mark" (default 50), which is the + %% number of messages per second allowed to come from + %% error_logger. This is the same as used by + %% opscode-chef-mover, FWIW. + {error_logger_hwm, 1000} + ]}, + + {chef_secrets, [ + {provider, chef_secrets_env} + ]}, + + {darklaunch, [ + {config, "{{pkg.svc_data_path}}/dark_launch_features.json" }, + {reload_time, 10000 } + ]}, + + {oc_chef_wm, [ +{{~#with cfg/oc_chef_wm}} + {ip_mode, [ {{../cfg.private_chef.ip_mode}} ] }, + {api_version, "{{api_version}}" }, + {server_flavor, "cs" }, + + {default_orgname, undefined }, + + {ip, "{{listen_ip}}"}, + {port, {{port}} }, + {reqid_header_name, "X-Request-Id"}, + {auth_skew, {{auth_skew}} }, + %% currently only used by the search endpoint to bound + %% how many nodes are deserialized at a time in + %% preparing a response. + {bulk_fetch_batch_size, {{bulk_fetch_batch_size}} }, + {superusers, [<<"pivotal">>]}, + %% metrics config + {root_metric_key, "chefAPI"}, + + {authz_timeout, 2000}, + {authz_fanout, 20}, + + {reindex_batch_size, {{reindex_batch_size}} }, + + {enable_actions, false}, + {actions_fqdn, <<"localhost">>}, + {ldap, []}, + %% these are used for reporting on license status on the + %% license endpoint; it would have been nice to give these + %% their own logical section, but erlang requires these to + %% be part of a loaded application + {node_license, {{node_license}} }, + {upgrade_url, <<"http://www.chef.io/contact/on-premises-simple">>}, + {max_request_size, {{max_request_size }} }, + {server_version, "{{server_version}}"}, + {health_ping_timeout, 400}, + {health_ping_modules, [ + oc_chef_authz, + chef_sql, + chef_elasticsearch + ]}, + {base_resource_url, host_header}, + {strict_search_result_acls, {{strict_search_result_acls}} }, + + {rabbitmq, [ + {management,[ + {user, "rabbitmgmt"}, + {port, 15672}, + % rabbitmq management http connection pool + {rabbitmq_actions_management_service, [ + {root_url, "https://127.0.0.1:15672/api"}, + {timeout, 30000}, + {init_count, 25}, + {max_count, 100}, + {cull_interval, {60, sec}}, + {max_age, {70, sec}}, + {max_connection_duration, {70, sec}}, + + {ibrowse_options, [ + {connect_timeout, 10000} + ]} + ]} + ]}, + {monitoring, [ + {queue_length_monitor_enabled, false}, + {queue_length_monitor_vhost, "/analytics"}, + {queue_length_monitor_queue, "alaska"}, + {queue_length_monitor_millis, 30000}, + {queue_length_monitor_timeout_millis, 5000}, + {drop_on_full_capacity, true}, + {prevent_erchef_startup_on_full_capacity, false}, + {queue_at_capacity_affects_overall_status, false} + ]} + ]} +{{/with}} + ]}, + + {chef_authn, [ + {secrets_module, {chef_secrets, get, [{default, [<<"chef-server">>, <<"webui_pub_key">>]}]}}, + {keygen_cache_workers, {{cfg.chef_authn.keygen_cache_size}} }, + {keygen_cache_size, {{cfg.chef_authn.keygen_cache_size}} }, + {keygen_start_size, {{cfg.chef_authn.keygen_start_size}} }, + {keygen_timeout, {{cfg.chef_authn.keygen_timeout}} }, + {keygen_size, 2048} + ]}, + + {oc_chef_authz, [ +{{#if bind.oc_bifrost ~}} + {{#eachAlive bind.oc_bifrost.members as |member| ~}} + {{#if @last ~}} + {authz_root_url, "http://{{member.sys.ip}}:9463" }, + {authz_service, [ + {root_url, "http://{{member.sys.ip}}:9463" }, + {timeout, 2000}, + {init_count, 100}, + {max_count, 100}, + {queue_max, 200}, + {cull_interval, {1, min}}, + {max_age, {70, sec}}, + {max_connection_duration, {70, sec}}, + {ibrowse_options, [{connect_timeout, 5000}]} + ]}, + {{/if ~}} + {{/eachAlive ~}} +{{else ~}} + {authz_root_url, "{{cfg.oc_bifrost_url}}" }, + {authz_service, [ + {root_url, "{{cfg.oc_bifrost_url}}" }, + {timeout, 2000}, + {init_count, 100}, + {max_count, 100}, + {queue_max, 200}, + {cull_interval, {1, min}}, + {max_age, {70, sec}}, + {max_connection_duration, {70, sec}}, + {ibrowse_options, [{connect_timeout, 5000}]} + ]}, +{{/if ~}} + {cleanup_batch_size, 0} + ]}, + + {chef_db, [ + {bulk_fetch_batch_size, 5} + ]}, + +%% need to revise if we want a rabbitmq + {chef_index, [ + {ip_mode, [ ipv4 ] }, + {rabbitmq_host, "127.0.0.1"}, + {rabbitmq_port, 5672 }, + {rabbitmq_user, <<"chef">>}, + {rabbitmq_vhost, <<"/chef">>}, + {rabbitmq_exchange, <<"">>}, + {search_provider, elasticsearch}, + {search_queue_mode, batch}, + {search_batch_max_size, 5000000}, + {search_batch_max_wait, 10}, + {reindex_sleep_min_ms, 500}, + {reindex_sleep_max_ms, 2000}, + {reindex_item_retries, 3}, + {solr_elasticsearch_major_version, 5}, + {solr_service, [ +{{#if bind.elasticsearch ~}} + {{#eachAlive bind.elasticsearch.members as |member| ~}} + {{#if @last ~}} + {root_url, "http://{{member.sys.ip}}:{{member.cfg.http-port}}"}, + {{/if ~}} + {{/eachAlive ~}} +{{else ~}} + {root_url, "http://{{cfg.elasticsearch_url}}"}, +{{/if ~}} + {timeout, 30000}, + {init_count, 25}, + {max_count, 100}, + {cull_interval, {1, min}}, + {max_age, {70, sec}}, + {max_connection_duration, {70,sec}}, + {ibrowse_options, [{connect_timeout, 10000}]} + ]}, + {rabbitmq_index_management_service, [ + {enabled, false}, %% disabled for now in habitat world + {user, "rabbitmgmt"}, + {root_url, "https://127.0.0.1:15672/api"}, + {timeout, 30000}, + {init_count, 25}, + {max_count, 100}, + {cull_interval, {60, sec}}, + {max_age, {70, sec}}, + {max_connection_duration, {70, sec}}, + + {ibrowse_options, [ + {connect_timeout, 10000} + ]} + ]} + ]}, + + {chef_objects, [ +{{#if bind.bookshelf ~}} + {{#eachAlive bind.bookshelf.members as |member| ~}} + {{#if @last ~}} + {s3_url, "http://{{member.sys.ip}}:{{member.cfg.port}}"}, + {{/if ~}} + {{/eachAlive ~}} +{{else ~}} + {s3_url, "http://bookshelf:4321"}, +{{/if ~}} + {s3_external_url, host_header}, + {s3_platform_bucket_name, "bookshelf"}, + {s3_url_ttl, 28800}, + {s3_url_expiry_window_size, {100, percent}}, + {s3_parallel_ops_timeout, 5000}, + {s3_parallel_ops_fanout, 10}, + {depsolver_timeout, 20000}, + {depsolver_pooler_timeout, 0} + ]}, +{{#if cfg.data_collector.enabled ~}} + {data_collector, [ + {root_url, "https://{{cfg.data_collector.server}}:{{cfg.data_collector.port}}/data-collector/v0/"}, + {timeout, 30000}, + {init_count, 25}, + {max_count, 100}, + {cull_interval, {1, min}}, + {max_age, {70, sec}}, + {max_connection_duration, {70,sec}}, + {ibrowse_options, [{connect_timeout, 10000}]} + ]}, +{{/if ~}} + {stats_hero, [ + {udp_socket_pool_size, 20 }, + {protocol, estatsd}, + {estatsd_host, "127.0.0.1"}, + {estatsd_port, 9466} + ]}, + + {opscoderl_httpc, [ + {pooler_timeout, 2000} + ]}, + + {sqerl, [ + {db_driver_mod, sqerl_pgsql_client}, + {ip_mode, [ ipv4 ] }, + %% {config_cb, {chef_secrets_sqerl, config, [{<<"opscode_erchef">>, <<"sql_password">>}]}}, + %% Database connection parameters +{{#if bind.database ~}} + {{#eachAlive bind.database.members as |member| ~}} + {{#if @last ~}} + {db_host, "{{member.sys.ip}}"}, + {db_port, {{member.cfg.port}} }, + {db_user, "{{member.cfg.superuser_name}}"}, + {db_pass, "{{member.cfg.superuser_password}}" }, + {{/if ~}} + {{/eachAlive ~}} +{{else ~}} + {db_host, "{{cfg.database_host}}"}, + {db_port, 5432}, + {db_user, "hab"}, + {db_pass, "chefrocks" }, +{{/if ~}} + {db_name, "opscode_chef" }, + {idle_check, 10000}, + {pooler_timeout, 2000}, + {db_timeout, 5000}, + {prepared_statements, {oc_chef_sql, statements, [pgsql]}}, + {column_transforms, [ + {<<"created_at">>, {sqerl_transformers, convert_YMDHMS_tuple_to_datetime}}, + {<<"updated_at">>, {sqerl_transformers, convert_YMDHMS_tuple_to_datetime}} + ]} + ]}, + + {webmachine, [ + {log_handlers, [ + {oc_wm_request_logger, [ + {file, "{{pkg.svc_var_path}}/logs/requests.log"}, + {file_size, 100}, %% Size in MB + {files, 10}, + {annotations, [req_id, org_name, msg, darklaunch, perf_stats, user, req_api_version]} + ]} + ]} + ]}, + + {ibrowse, [ + {default_max_sessions, 256}, + {default_max_pipeline_size, 1} + ]}, + + {pooler, [ + {pools, [ +{{~#with cfg/pooler/sqerl }} + [{name, sqerl}, + {max_count, {{max_count}}}, + {init_count, {{init_count}} }, + {queue_max, {{queue_max}} }, + {start_mfa, {sqerl_client, start_link, []}} + ], +{{~/with }} +{{~#with cfg/pooler/chef_depsolver }} + [{name, chef_depsolver}, + {max_count, {{max_count}}}, + {init_count, {{init_count}} }, + {queue_max, {{queue_max}} }, + {start_mfa, {chef_depsolver_worker, start_link, []}} + ] +{{~/with }} + ]}, + {metrics_module, folsom_metrics} + ]}, + {ssl, [ + {session_cb, noop_session_cache} + ]} +]. diff --git a/src/oc_erchef/habitat/config/veil-secrets.json b/src/oc_erchef/habitat/config/veil-secrets.json new file mode 100644 index 0000000000..cc9ade9211 --- /dev/null +++ b/src/oc_erchef/habitat/config/veil-secrets.json @@ -0,0 +1,7 @@ +{{~ #if bind.chef-server-ctl}} + {{~ #eachAlive bind.chef-server-ctl.members as |member|}} + {{~ #if @last}} +{{toJson member.cfg.secrets}} + {{~ /if}} + {{~ /eachAlive}} +{{~ /if}} diff --git a/src/oc_erchef/habitat/config/vm.args b/src/oc_erchef/habitat/config/vm.args new file mode 100644 index 0000000000..08e24addbd --- /dev/null +++ b/src/oc_erchef/habitat/config/vm.args @@ -0,0 +1,38 @@ +## Name of the node +-name erchef@127.0.0.1 + +## Cookie for distributed erlang +-setcookie erchef + +## Enable SMP even on single-core machines +## The Erchef keygen cache feature uses +## enif_send, which does not work on single- +## core machines unless SMP is enabled. +-smp enable + +## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive +## (Disabled by default..use with caution!) +##-heart + +## Enable kernel poll and a few async threads ++K true ++A 5 + +## Increase number of concurrent ports/sockets +-env ERL_MAX_PORTS 4096 + +## Add directory for hot patches to the front of the code path +-pa lib/patches + +## Tweak GC to run more often +-env ERL_FULLSWEEP_AFTER 10 + +## Increase logfile size to 10M +-env RUN_ERL_LOG_MAXSIZE 10000000 + +{{#if cfg.fips_enabled ~}} +## Runtime switches to enable loading custom crypto module +## that supports OpenSSL-FIPS +-env ERLANG_CRYPTO2_PATH {{pkg.path}}/lib/erlang-crypto2/priv +-pa {{pkg.path}}/lib/erlang-crypto2/ebin +{{/if ~}} diff --git a/src/oc_erchef/habitat/default.toml b/src/oc_erchef/habitat/default.toml new file mode 100644 index 0000000000..e61c6abdd9 --- /dev/null +++ b/src/oc_erchef/habitat/default.toml @@ -0,0 +1,70 @@ +[private_chef] +ip_mode="ipv4" +fips_enabled=false + +[lager] +lager_console_backend="info" +error_logger_hwm=50 + +[oc_chef_wm] +api_version="12.0.0" +listen_ip="0.0.0.0" +port=8000 +auth_skew=900 +bulk_fetch_data_size=5 +reindex_batch_size=10 +node_license=999999 +max_request_size=1000000 +server_version="" +bulk_fetch_batch_size=5 +strict_search_result_acls=false + +[chef_authn] +keygen_cache_workers=10 +keygen_cache_size=10 +keygen_start_size=0 +keygen_timeout=1000 + +[oc_chef_authz] + +[chef_db] + +[chef_index] + +[chef_objects] + +[data_collector] +enabled=false +server="automate-server.test" +port="443" + +[stats_hero] + +[opscodeerl_httpc] +pooler_timeout="" + +[sqerl] +db_host="localhost" +db_port=5432 +db_user="opscode_chef" +db_pass="" +db_name="opscode_chef" +db_pooler_timeout=2000 +sql_db_timeout=5000 + +[webmachine] + +[ibrowse] +default_max_sessions="" +default_max_pipeline_size="" + +[pooler.sqerl] +max_count=100 +init_count=20 +queue_max=20 + +[pooler.chef_depsolver] +max_count=5 +init_count=5 +queue_max=50 + diff --git a/src/oc_erchef/habitat/hooks/health_check b/src/oc_erchef/habitat/hooks/health_check new file mode 100644 index 0000000000..65bb88999b --- /dev/null +++ b/src/oc_erchef/habitat/hooks/health_check @@ -0,0 +1,20 @@ +#!/bin/sh +# + +# Health Check for oc_erchef + +# default return code is 0 +rc=0 + +{{pkgPathFor "core/curl"}}/bin/curl -X GET -sS --fail --max-time 2 http://localhost:8000/_status + +case $? in + # Zero exit status means curl got back a 200 end everything is ok. + 0) + rc=0 ;; + # Anything else is critical. + *) + rc=2 ;; +esac + +exit $rc diff --git a/src/oc_erchef/habitat/hooks/init b/src/oc_erchef/habitat/hooks/init new file mode 100644 index 0000000000..8c81e1abd9 --- /dev/null +++ b/src/oc_erchef/habitat/hooks/init @@ -0,0 +1,9 @@ +#!/bin/bash + +exec 2>&1 + +mkdir -p {{pkg.svc_var_path}}/logs + +source "{{pkg.svc_config_path}}/elasticsearch-init.sh" +source "{{pkg.svc_config_path}}/database-migrations.sh" +cd {{pkg.path}} && bundle exec ruby "{{pkg.svc_config_path}}/chef_server_data_bootstrap.rb" diff --git a/src/oc_erchef/habitat/hooks/reconfigure b/src/oc_erchef/habitat/hooks/reconfigure new file mode 100644 index 0000000000..ce8305d414 --- /dev/null +++ b/src/oc_erchef/habitat/hooks/reconfigure @@ -0,0 +1,3 @@ +#!/bin/bash + +cd {{pkg.path}} && bundle exec ruby "{{pkg.svc_config_path}}/chef_server_data_bootstrap.rb" diff --git a/src/oc_erchef/habitat/hooks/run b/src/oc_erchef/habitat/hooks/run new file mode 100644 index 0000000000..40b068dca8 --- /dev/null +++ b/src/oc_erchef/habitat/hooks/run @@ -0,0 +1,21 @@ +#!/bin/sh +exec 2>&1 + +export HOME={{pkg.svc_var_path}} +export RELX_CONFIG_PATH="{{pkg.svc_config_path}}/sys.config" +export VMARGS_PATH="{{pkg.svc_config_path}}/vm.args" +export CHEF_SECRETS_DATA=$(cat {{pkg.svc_config_path}}/veil-secrets.json) +# these are needed in order to make depselector start up +export PATH={{pkgPathFor "core/ruby"}}/bin:{{pkgPathFor "core/bundler"}}/bin:${PATH} +export LD_LIBRARY_PATH={{pkgPathFor "core/libffi"}}/lib:${LD_LIBRARY_PATH} +export GEM_HOME={{pkgPathFor "core/bundler"}} +export GEM_PATH={{pkgPathFor "core/bundler"}} + +{{#if cfg.fips_enabled ~}} +export OPENSSL_FIPS=1 +{{/if ~}} + +# non root svc user and we need write access to the file +cp {{pkg.svc_config_path}}/dark_launch_features.json {{pkg.svc_data_path}} + +exec oc_erchef foreground diff --git a/src/oc_erchef/habitat/plan.sh b/src/oc_erchef/habitat/plan.sh new file mode 100644 index 0000000000..7ce3a3e120 --- /dev/null +++ b/src/oc_erchef/habitat/plan.sh @@ -0,0 +1,119 @@ +pkg_name=oc_erchef +pkg_origin=chef-server +pkg_license=('Apache-2.0') +pkg_maintainer="The Chef Server Maintainers " +pkg_deps=( + core/erlang18 + core/cacerts + core/coreutils + core/curl + core/openssl/1.0.2l/20171014213633 + core/gcc-libs + core/ruby + core/sqitch + core/gecode + core/bundler + chef-server/dbdpg +) +pkg_build_deps=(core/make core/git core/gcc) +pkg_lib_dirs=(lib) +pkg_include_dirs=(include) +pkg_bin_dirs=(bin) +pkg_description="Erlang implementation of the Chef Server's REST API." +pkg_upstream_url="https://github.com/chef/chef-server" + +pkg_exposes=(port) +pkg_exports=( + [port]=oc_chef_wm.port + [data_collector_enabled]=data_collector.enabled + [data_collector_server]=data_collector.server + [data_collector_port]=data_collector.port +) + +pkg_binds_optional=( + [chef-server-ctl]="secrets" + [database]="port" + [elasticsearch]="http-port" + [oc_bifrost]="port" + [bookshelf]="port" +) + +pkg_version() { + cat "$PLAN_CONTEXT/../../../VERSION" +} + +do_before() { + do_default_before + if [ ! -f "$PLAN_CONTEXT/../../../VERSION" ]; then + exit_with "Cannot find VERSION file! You must run \"hab studio enter\" from the chef-server project root." 56 + fi + update_pkg_version +} + +do_download() { + return 0 +} + +do_verify() { + return 0 +} + +do_unpack() { + # Copy everything over to the cache path so we don't write out our compiled + # deps into the working directory, but into the cache directory. + mkdir -p "$HAB_CACHE_SRC_PATH/$pkg_dirname" + cp -R "$PLAN_CONTEXT/../"* "$HAB_CACHE_SRC_PATH/$pkg_dirname" +} + +do_prepare() { + # The `/usr/bin/env` path is hardcoded in jiffy, so we'll add a symlink since fix_interpreter won't work. + if [[ ! -r /usr/bin/env ]]; then + ln -sv "$(pkg_path_for coreutils)/bin/env" /usr/bin/env + _clean_env=true + fi + + # Need this for enterprise_ctl to compile with an old version of rebar + build_line "Setting PATH=$PATH:$HAB_CACHE_SRC_PATH/$pkg_dirname" + export PATH=$PATH:"$HAB_CACHE_SRC_PATH/$pkg_dirname" + + build_line "RELX_OUTPUT_DIR=$pkg_prefix" + export RELX_OUTPUT_DIR=$pkg_prefix + + git config --global http.sslCAInfo \ + "$(pkg_path_for core/cacerts)"/ssl/certs/cacert.pem + + fix_interpreter ./rebar3 core/coreutils bin/env +} + + +do_build() { + _bundler_dir="$(pkg_path_for bundler)" + export REL_VERSION=$pkg_version + export USE_SYSTEM_GECODE=1 + export GEM_HOME="${pkg_path}/vendor/bundle" + export GEM_PATH="${_bundler_dir}:${GEM_HOME}" + export LIBRARY_PATH="$(pkg_path_for core/gecode)/lib" + export LD_LIBRARY_PATH="$(pkg_path_for core/gecode)/lib" + export CPLUS_INCLUDE_PATH="$(pkg_path_for core/gecode)/include" + make omnibus +} + +do_install() { + export HOME="${pkg_prefix}" + cp Gemfile_habitat ${pkg_prefix}/Gemfile + cp Gemfile_habitat.lock ${pkg_prefix}/Gemfile.lock + bundle install --path "${pkg_prefix}/vendor/bundle" && bundle config path ${pkg_prefix}/vendor/bundle + cp -r "_build/default/rel/oc_erchef/"* "${pkg_prefix}" + cp -R "$HAB_CACHE_SRC_PATH/$pkg_dirname/schema" "$pkg_prefix" +} + +do_check() { + make +} + +do_end() { + # Clean up the `env` link, if we set it up. + if [[ -n "$_clean_env" ]]; then + rm -fv /usr/bin/env + fi +} diff --git a/src/openresty-lpeg/.gitignore b/src/openresty-lpeg/.gitignore new file mode 100644 index 0000000000..fbca225379 --- /dev/null +++ b/src/openresty-lpeg/.gitignore @@ -0,0 +1 @@ +results/ diff --git a/src/openresty-lpeg/habitat/default.toml b/src/openresty-lpeg/habitat/default.toml new file mode 100644 index 0000000000..03a05306cd --- /dev/null +++ b/src/openresty-lpeg/habitat/default.toml @@ -0,0 +1,3 @@ +# Use this file to templatize your application's native configuration files. +# See the docs at https://www.habitat.sh/docs/create-packages-configure/. +# You can safely delete this file if you don't need it. diff --git a/src/openresty-lpeg/habitat/plan.sh b/src/openresty-lpeg/habitat/plan.sh new file mode 100644 index 0000000000..18481f85d5 --- /dev/null +++ b/src/openresty-lpeg/habitat/plan.sh @@ -0,0 +1,28 @@ +pkg_name=openresty-lpeg +pkg_origin=irvingpop +pkg_version="0.12" +pkg_maintainer="The Habitat Maintainers " +pkg_license=('MIT') +pkg_source="http://www.inf.puc-rio.br/~roberto/lpeg/lpeg-${pkg_version}.tar.gz" +pkg_shasum="efa545144cd219eee823af7624d90f78c2230677ba740b7151c5d0c303778b76" +pkg_deps=(core/openresty) +pkg_build_deps=(core/make core/gcc) +pkg_description="Parsing Expression Grammars For Lua" +pkg_upstream_url="http://www.inf.puc-rio.br/~roberto/lpeg/" +pkg_dirname="lpeg-${pkg_version}" +# pkg_filename="${pkg_name}-${pkg_version}.tar.gz" +# pkg_lib_dirs=(lib) +# pkg_include_dirs=(include) +# pkg_bin_dirs=(bin) +# pkg_pconfig_dirs=(lib/pconfig) +# pkg_svc_group="$pkg_svc_user" + +do_build() { + cd $HAB_CACHE_SRC_PATH/$pkg_dirname + make "LUADIR=$(hab pkg path "core/openresty")/luajit/include/luajit-2.1" +} + +do_install() { + # mkdir $pkg_prefix/lib + install -p -m 0755 lpeg.so $pkg_prefix +} diff --git a/src/openresty-noroot/habitat/default.toml b/src/openresty-noroot/habitat/default.toml new file mode 100644 index 0000000000..d879b113dd --- /dev/null +++ b/src/openresty-noroot/habitat/default.toml @@ -0,0 +1,35 @@ + # # #### # # # # # + ## # # # # ## # # # + # # # # # # # # ## + # # # # ### # # # # ## + # ## # # # # ## # # + # # #### # # # # # + # For help with NGINX Config Tuning, + # refer to: http://nginx.org/en/docs/http/ngx_http_core_module.html + + +#### General Configuration +# worker_processes: Number of NGINX processes. Default = 1 +worker_processes = 4 + +#### Events Context Configuration +[events] +# worker_connections: Connections per Worker Process. Default = 1024 +worker_connections = 1024 + +#### HTTP Context Configuration +[http] +# http.sendfile: Enable (on) or disable (off) Sendfile Support. Default = on +sendfile = "on" + +# http.tcp_nopush: Enable (on) or disable (off) use of TCP_NOPUSH or TCP_CORK socket option. Default = on +tcp_nopush = "on" + +# http.tcp_nodelay: Enable (on) or disable (off) use of the TCP_NODELAY option. Default = on +tcp_nodelay = "on" + +# http.keepalive_timeout: Timeout on client connection keepalive, in seconds. Default = 75 +keepalive_timeout = 60 + +[http.listen] +port = 80 diff --git a/src/openresty-noroot/habitat/hooks/init b/src/openresty-noroot/habitat/hooks/init new file mode 100644 index 0000000000..b19374f7b4 --- /dev/null +++ b/src/openresty-noroot/habitat/hooks/init @@ -0,0 +1,8 @@ +#!/bin/bash + +exec 2>&1 + +echo "Linking conf dirs" + +ln -sf {{pkg.path}}/nginx/conf {{pkg.svc_var_path}}/ +ln -sf {{pkg.path}}/nginx/html {{pkg.svc_var_path}}/ diff --git a/src/openresty-noroot/habitat/hooks/run b/src/openresty-noroot/habitat/hooks/run new file mode 100644 index 0000000000..beb8d9388a --- /dev/null +++ b/src/openresty-noroot/habitat/hooks/run @@ -0,0 +1,5 @@ +#!/bin/bash + +exec 2>&1 + +exec openresty -g 'daemon off;' -c {{pkg.svc_var_path}}/conf/nginx.conf diff --git a/src/openresty-noroot/habitat/plan.sh b/src/openresty-noroot/habitat/plan.sh new file mode 100644 index 0000000000..e7f463a450 --- /dev/null +++ b/src/openresty-noroot/habitat/plan.sh @@ -0,0 +1,93 @@ +pkg_name=openresty-noroot +pkg_origin=chef-server +pkg_version=1.11.2.5 +pkg_description="Scalable Web Platform by Extending NGINX with Lua" +pkg_maintainer="The Chef Server Maintainers " +pkg_license=('BSD-2-Clause') +pkg_source=https://openresty.org/download/openresty-${pkg_version}.tar.gz +pkg_dirname=openresty-${pkg_version} +pkg_filenam=openresty-${pkg_version}.tar.gz +pkg_upstream_url=http://openresty.org/ +pkg_shasum=f8cc203e8c0fcd69676f65506a3417097fc445f57820aa8e92d7888d8ad657b9 +pkg_deps=(core/glibc core/gcc-libs core/libxml2 core/libxslt core/zlib core/bzip2 core/openssl/1.0.2l/20171014213633 core/pcre core/coreutils core/perl core/which) +pkg_build_deps=(core/gcc core/make) +pkg_lib_dirs=(lib) +pkg_bin_dirs=(bin nginx/sbin luajit/bin) +pkg_include_dirs=(include) +pkg_svc_user="hab" +pkg_exports=( + [port]=http.listen.port +) +pkg_exposes=(port) + +lpeg_version="0.12" +lpeg_source="http://www.inf.puc-rio.br/~roberto/lpeg/lpeg-${lpeg_version}.tar.gz" + +do_prepare() { + # The `/usr/bin/env` path is hardcoded, so we'll add a symlink. + if [[ ! -r /usr/bin/env ]]; then + ln -sv "$(pkg_path_for coreutils)/bin/env" /usr/bin/env + _clean_env=true + fi +} + +do_build() { + ./configure --prefix="$pkg_prefix" \ + --user=hab \ + --group=hab \ + --http-log-path=stdout \ + --error-log-path=stderr \ + --with-ipv6 \ + --with-debug \ + --with-pcre \ + --with-md5-asm \ + --with-pcre-jit \ + --with-sha1-asm \ + --with-file-aio \ + --with-luajit \ + --with-stream=dynamic \ + --with-mail=dynamic \ + --with-http_gunzip_module \ + --with-http_gzip_static_module \ + --with-http_realip_module \ + --with-http_v2_module \ + --with-http_ssl_module \ + --with-http_stub_status_module \ + --with-http_addition_module \ + --with-http_degradation_module \ + --with-http_flv_module \ + --with-http_mp4_module \ + --with-http_secure_link_module \ + --with-http_sub_module \ + --with-http_slice_module \ + --with-cc-opt="$CFLAGS" \ + --with-ld-opt="$LDFLAGS" \ + --without-http_ssi_module \ + --without-mail_smtp_module \ + --without-mail_imap_module \ + --without-mail_pop3_module \ + -j"$(nproc)" + + make -j"$(nproc)" + +} + +do_install() { + make install + fix_interpreter "$pkg_prefix/bin/*" core/coreutils bin/env + + cd $HAB_CACHE_SRC_PATH + wget $lpeg_source + tar -xzf lpeg-${lpeg_version}.tar.gz + cd lpeg-${lpeg_version} + make "LUADIR=$pkg_prefix/luajit/include/luajit-2.1" || attach + install -p -m 0755 lpeg.so $pkg_prefix/luajit/lib/lua/5.1/ || attach +# cp $(hab pkg path "chef-server/openresty-lpeg")/lpeg.so ${pkg_prefix}/luajit/lib/lua/5.1/ || attach +} + +do_end() { + # Clean up the `env` link, if we set it up. + if [[ -n "$_clean_env" ]]; then + rm -fv /usr/bin/env + fi +}