diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 54604ea..1a5f906 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -31,8 +31,14 @@ jobs: fail-fast: false runs-on: ubuntu-latest - + environment: base + env: + IBEXA_CLI_TOKEN: ${{ secrets.IBEXA_CLI_TOKEN }} + IBEXA_PROJECT: ${{ vars.IBEXA_PROJECT }} steps: + - name: environment check + run: | + echo "IBEXA_PROJECT=${IBEXA_PROJECT}" - uses: ddev/github-action-add-on-test@v2 with: ddev_version: ${{ matrix.ddev_version }} diff --git a/README.md b/README.md index bb3043b..fd472e2 100644 --- a/README.md +++ b/README.md @@ -1,93 +1,20 @@ -[![tests](https://github.com/ddev/ddev-addon-template/actions/workflows/tests.yml/badge.svg)](https://github.com/ddev/ddev-addon-template/actions/workflows/tests.yml) ![project is maintained](https://img.shields.io/maintenance/yes/2024.svg) +[![tests](https://github.com/rfay/ddev-ibexa-cloud/actions/workflows/tests.yml/badge.svg)](https://github.com/rfay/ddev-ibexa-cloud/actions/workflows/tests.yml) ![project is maintained](https://img.shields.io/maintenance/yes/2025.svg) -# ddev-addon-template +The [Ibexa Cloud](https://www.ibexa.co/products/ibexa-cloud) has its own CLI, instead of using the `platform` CLI. -* [What is ddev-addon-template?](#what-is-ddev-addon-template) -* [Components of the repository](#components-of-the-repository) -* [Getting started](#getting-started) -* [How to debug in Github Actions](#how-to-debug-tests-github-actions) +This add-on provides integration for Ibexa Cloud. -## What is ddev-addon-template? +1. Configure your Ibexa project for DDEV if you haven't already, see [DDEV Ibexa Quickstart](https://ddev.readthedocs.io/en/stable/users/quickstart/#ibexa-dxp) +2. `ddev get rfay/ddev-ibexa-cloud` (# or in DDEV v1.23.5+ `ddev add-on get rfay/ddev-ibexa-cloud`) +3. Configure your IBEXA_CLI_TOKEN globally, `ddev config global --web-environment-add=IBEXA_CLI_TOKEN=nf4amudfn23biyourtoken` +4. Configure your IBEXA_PROJECT, IBEXA_ENVIRONMENT, and IBEXA_APP environment variables, for example `ddev config --web-environment-add=IBEXA_PROJECT=nf4amudfn23biyourproject,IBEXA_ENVIRONMENT=main,IBEXA_APP=app` +5. `ddev restart` +6. `ddev pull ibexa-cloud` -This repository is a template for providing [DDEV](https://ddev.readthedocs.io) add-ons and services. +## Running Automated Tests Locally -In DDEV addons can be installed from the command line using the `ddev get` command, for example, `ddev get ddev/ddev-redis` or `ddev get ddev/ddev-solr`. +* `IBEXA_CLI_TOKEN`, `IBEXA_PROJECT` and `IBEXA_ENVIRONMENT` should exist in the environment +* `brew tap kaos/shell && brew install bats-assert bats-file` -This repository is a quick way to get started. You can create a new repo from this one by clicking the template button in the top right corner of the page. -![template button](images/template-button.png) - -## Components of the repository - -* The fundamental contents of the add-on service or other component. For example, in this template there is a [docker-compose.addon-template.yaml](docker-compose.addon-template.yaml) file. -* An [install.yaml](install.yaml) file that describes how to install the service or other component. -* A test suite in [test.bats](tests/test.bats) that makes sure the service continues to work as expected. -* [Github actions setup](.github/workflows/tests.yml) so that the tests run automatically when you push to the repository. - -## Getting started - -1. Choose a good descriptive name for your add-on. It should probably start with "ddev-" and include the basic service or functionality. If it's particular to a specific CMS, perhaps `ddev--servicename`. -2. Create the new template repository by using the template button. -3. Globally replace "addon-template" with the name of your add-on. -4. Add the files that need to be added to a DDEV project to the repository. For example, you might replace `docker-compose.addon-template.yaml` with the `docker-compose.*.yaml` for your recipe. -5. Update the `install.yaml` to give the necessary instructions for installing the add-on: - - * The fundamental line is the `project_files` directive, a list of files to be copied from this repo into the project `.ddev` directory. - * You can optionally add files to the `global_files` directive as well, which will cause files to be placed in the global `.ddev` directory, `~/.ddev`. - * Finally, `pre_install_commands` and `post_install_commands` are supported. These can use the host-side environment variables documented [in DDEV docs](https://ddev.readthedocs.io/en/stable/users/extend/custom-commands/#environment-variables-provided). - -6. Update `tests/test.bats` to provide a reasonable test for your repository. Tests are triggered either by manually executing `bats ./tests/test.bats`, automatically on every push to the repository, or periodically each night. Please make sure to attend to test failures when they happen. Others will be depending on you. Bats is a simple testing framework that just uses Bash. To run a Bats test locally, you have to [install bats-core](https://bats-core.readthedocs.io/en/stable/installation.html) first. Then you download your add-on, and finally run `bats ./tests/test.bats` within the root of the uncompressed directory. To learn more about Bats see the [documentation](https://bats-core.readthedocs.io/en/stable/). -7. When everything is working, including the tests, you can push the repository to GitHub. -8. Create a [release](https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository) on GitHub. -9. Test manually with `ddev get `. -10. You can test PRs with `ddev get https://github.com///tarball/` -11. Update the `README.md` to describe the add-on, how to use it, and how to contribute. If there are any manual actions that have to be taken, please explain them. If it requires special configuration of the using project, please explain how to do those. Examples in [ddev/ddev-solr](https://github.com/ddev/ddev-solr), [ddev/ddev-memcached](https://github.com/ddev/ddev-memcached), and (advanced) [ddev-platformsh](https://github.com/ddev/ddev-platformsh). -12. Update the `README.md` header in Title Case format, for example, use `# DDEV Redis`, not `# ddev-redis`. -13. Add a good short description to your repo, and add the [topic](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/classifying-your-repository-with-topics) "ddev-get". It will immediately be added to the list provided by `ddev get --list --all`. -14. When it has matured you will hopefully want to have it become an "official" maintained add-on. Open an issue in the [DDEV queue](https://github.com/ddev/ddev/issues) for that. - -Add-ons were covered in [DDEV Add-ons: Creating, maintaining, testing](https://www.youtube.com/watch?v=TmXqQe48iqE) (part of the [DDEV Contributor Live Training](https://ddev.com/blog/contributor-training)). - -Note that more advanced techniques are discussed in [Advanced Add-On Techniques](https://ddev.com/blog/advanced-add-on-contributor-training/) and [DDEV docs](https://ddev.readthedocs.io/en/stable/users/extend/additional-services/). - -## How to debug tests (Github Actions) - -1. You need an SSH-key registered with GitHub. You either pick the key you have already used with `github.com` or you create a dedicated new one with `ssh-keygen -t ed25519 -a 64 -f tmate_ed25519 -C "$(date +'%d-%m-%Y')"` and add it at `https://github.com/settings/keys`. - -2. Add the following snippet to `~/.ssh/config`: - -``` -Host *.tmate.io - User git - AddKeysToAgent yes - UseKeychain yes - PreferredAuthentications publickey - IdentitiesOnly yes - IdentityFile ~/.ssh/tmate_ed25519 -``` -3. Go to `https://github.com///actions/workflows/tests.yml`. - -4. Click the `Run workflow` button and you will have the option to select the branch to run the workflow from and activate `tmate` by checking the `Debug with tmate` checkbox for this run. - -![tmate](images/gh-tmate.jpg) - -5. After the `workflow_dispatch` event was triggered, click the `All workflows` link in the sidebar and then click the `tests` action in progress workflow. - -7. Pick one of the jobs in progress in the sidebar. - -8. Wait until the current task list reaches the `tmate debugging session` section and the output shows something like: - -``` -106 SSH: ssh PRbaS7SLVxbXImhjUqydQBgDL@nyc1.tmate.io -107 or: ssh -i PRbaS7SLVxbXImhjUqydQBgDL@nyc1.tmate.io -108 SSH: ssh PRbaS7SLVxbXImhjUqydQBgDL@nyc1.tmate.io -109 or: ssh -i PRbaS7SLVxbXImhjUqydQBgDL@nyc1.tmate.io -``` - -9. Copy and execute the first option `ssh PRbaS7SLVxbXImhjUqydQBgDL@nyc1.tmate.io` in the terminal and continue by pressing either q or Ctrl + c. - -10. Start the Bats test with `bats ./tests/test.bats`. - -For a more detailed documentation about `tmate` see [Debug your GitHub Actions by using tmate](https://mxschmitt.github.io/action-tmate/). - -**Contributed and maintained by `@CONTRIBUTOR`** +**Contributed and maintained by @rfay** diff --git a/commands/web/ibexa_cloud b/commands/web/ibexa_cloud new file mode 100755 index 0000000..6265f53 --- /dev/null +++ b/commands/web/ibexa_cloud @@ -0,0 +1,9 @@ +#!/bin/bash + +#ddev-generated +## Description: Run ibexa_cloud CLI inside the web container +## Usage: ibexa_cloud [flags] [args] +## Example: "ibexa_cloud user:list" or "platform ssh" or "platform service:list" +## ExecRaw: true + +ibexa_cloud "$@" diff --git a/config.ibexa-cloud.yaml b/config.ibexa-cloud.yaml new file mode 100644 index 0000000..2b8a8e2 --- /dev/null +++ b/config.ibexa-cloud.yaml @@ -0,0 +1,3 @@ +#ddev-generated +web_environment: + - PLATFORMSH_CLI_HOME=/usr/local/bin \ No newline at end of file diff --git a/docker-compose.addon-template.yaml b/docker-compose.addon-template.yaml deleted file mode 100644 index 4644037..0000000 --- a/docker-compose.addon-template.yaml +++ /dev/null @@ -1,16 +0,0 @@ -#ddev-generated -# Simple template to demonstrate addon-template -services: - addon-template: - container_name: ddev-${DDEV_SITENAME}-addon-template - image: busybox:stable - command: tail -f /dev/null - restart: "no" - # These labels ensure this service is discoverable by ddev. - labels: - com.ddev.site-name: ${DDEV_SITENAME} - com.ddev.approot: $DDEV_APPROOT - - volumes: - - ".:/mnt/ddev_config" - - "ddev-global-cache:/mnt/ddev-global-cache" diff --git a/install.yaml b/install.yaml index d70f351..fca0c8a 100644 --- a/install.yaml +++ b/install.yaml @@ -1,112 +1,18 @@ -# Details about the install.yaml file are at https://ddev.readthedocs.io/en/stable/users/extend/additional-services/#sections-and-features-of-ddev-get-add-on-installyaml -name: addon-template - -# pre_install_actions - list of actions to run before installing the addon. -# Examples would be removing an extraneous docker volume, -# or doing a sanity check for requirements. -# DDEV environment variables can be interpolated into these actions -# pre_install_actions are executed in the context of the target project's root directory. +name: ibexa-cloud pre_install_actions: - # - | - # #ddev-description:Check architecture type for incompatible arm64 type - # if [ "$(uname -m)" = "arm64" -o "$(uname -m)" = "aarch64" ]; then - # echo "This package does not work on arm64 (Apple Silicon) machines"; - # exit 1; - # fi - - # - "docker volume rm ddev-${DDEV_PROJECT}_solr 2>/dev/null || true" - - # You can also check for client DDEV version with ddev_version_constraint (see below). - # - | - # #ddev-description:Checking DDEV version - # if ! ( ddev debug capabilities 2>/dev/null | grep corepack >/dev/null 2>&1 ) ; then - # echo "This add-on requires DDEV v1.23+ or higher, please upgrade." && exit 2 - # fi - - # - 'echo "what is your platform.sh token" && read x' - - # This item shows templating using DDEV environment variables. - # - | - # #ddev-description:Touch a file to create it - # touch ${DDEV_APPROOT}/.ddev/somefile.${DDEV_PROJECT_TYPE}.${DDEV_DOCROOT}.txt - - # This item shows complex go templating possibilities based on yaml_read_files - # - | - # #ddev-description:Create a config.platformsh.yaml - # cat <${DDEV_APPROOT}/.ddev/config.platformsh.yaml - # php_version: {{ trimPrefix "php:" .platformapp.type }} - # database: - # type: {{ regexReplaceAll ":.*$" .services.db.type "" }} - # version: {{ regexReplaceAll "^.*:" .services.db.type "" }} - # docroot: {{ dig "web" "locations" "/" "root" "notfound" .platformapp }} - # {{- if eq .platformapp.build.flavor "composer" }} - # hooks: - # post-start: - # - composer: install - # {{- if .platformapp.hooks.deploy }} - # - exec: "{{ trimAll "\n" .platformapp.hooks.deploy | splitList "\n" | join ` && ` }}" - # {{- end }} - # {{- end }} - - # EOF - -# list of files and directories listed that are copied into project .ddev directory -# Each file should contain #ddev-generated so it can be replaced by a later `ddev get` -# if it hasn't been modified by the user. -# DDEV environment variables can be interpolated into these filenames -# If you use directories, they must be directories that are managed -# by this add-on, or removal could remove things that are not owned by it project_files: - - docker-compose.addon-template.yaml - # - some-directory/file1.txt - # - some-directory/file2.txt - # - extra_files_dir_created_by_this_template/ - # - somefile.sh + - commands/web/ibexa_cloud + - config.ibexa-cloud.yaml + - providers/ibexa-cloud.yaml + - web-build/Dockerfile.ibexa-cloud -# List of files and directories that are copied into the global .ddev directory -# DDEV environment variables can be interpolated into these filenames global_files: - # - commands/web/add-on-command - # - homeadditions/some-file.txt - -# Version constraint for DDEV that will be validated against the running DDEV executable -# and prevent add-on from being installed if it doesn't validate. -# See https://github.com/Masterminds/semver#checking-version-constraints for constraint rules. -# Available with DDEV v1.23.4+, and works only for DDEV v1.23.4+ binaries -# example: ddev_version_constraint: '>= v1.23.4' -ddev_version_constraint: '' - -# List of add-on names that this add-on depends on dependencies: - # - redis -# DDEV environment variables can be interpolated into these actions. -# post_install_actions are executed in the context of the target project's .ddev directory. post_install_actions: - # - chmod +x ~/.ddev/commands/web/somecommand - # - touch ${DDEV_APPROOT}/somefile.${GOOS}.${DDEV_WEBSERVER} - # - perl -pi -e 's/oldstring/newstring/g' ${DDEV_APPROOT}/.ddev/docker-compose.addon-template.yaml - -# Shell actions that can be done during removal of the add-on. -# Files listed in project_files section will be automatically removed here if they contain #ddev-generated line. -# removal_actions are executed in the context of the target project's .ddev directory. removal_actions: - # - rm ~/.ddev/commands/web/somecommand - # - | - # if [ -f ${DDEV_APPROOT}/.ddev/docker-compose.addon-template_extras.yaml ]; then - # if grep -q '#ddev-generated' ${DDEV_APPROOT}/.ddev/docker-compose.addon-template_extras.yaml; then - # rm -f ${DDEV_APPROOT}/.ddev/docker-compose.addon-template_extras.yaml - # else - # echo "Unwilling to remove '${DDEV_APPROOT}/.ddev/docker-compose.addon-template_extras.yaml' because it does not have #ddev-generated in it; you can manually delete it if it is safe to delete." - # fi - # fi -# Advanced usage - YAML files can be read in and then used as go template actions -# in pre_install_actions and post_install_actions -# See example in -# https://github.com/ddev/ddev/blob/master/cmd/ddev/cmd/testdata/TestCmdAddonComplex/recipe/install.yaml yaml_read_files: - # someyaml: someyaml.yaml - # otheryaml: someotheryaml.yaml + diff --git a/providers/ibexa-cloud.yaml b/providers/ibexa-cloud.yaml new file mode 100644 index 0000000..732d842 --- /dev/null +++ b/providers/ibexa-cloud.yaml @@ -0,0 +1,107 @@ +#ddev-generated +# Ibexa Cloud provider configuration. This works out of the box, but can be edited to add +# your own preferences. If you edit it, remove the `ddev-generated` line from the top so +# that it won't be overwritten. + +# This provider makes `ddev pull ibexa-cloud` work. + +# To use this configuration, + +# 1. Check out the site from Ibexa Cloud and then configure it with `ddev config`. You'll want to use `ddev start` and make sure the basic functionality is working. +# 2. Obtain and configure an API token. +# a. Login to the Ibexa Cloud Dashboard and go to Account->API Tokens to create an API token for ddev to use. +# b. Add the API token to the `web_environment` section in your global ddev configuration at ~/.ddev/global_config.yaml: +# ```yaml +# web_environment: +# - IBEXA_CLI_TOKEN=abcdeyourtoken +# ``` +# 3. Add IBEXA_PROJECT and IBEXA_ENVIRONMENT and optional IBEXA_APP (only if your environment contains more than one app) variables to your project `.ddev/config.yaml` or a `.ddev/config.ibexa-cloud.yaml` +# ```yaml +# web_environment: +# - IBEXA_PROJECT=nf4amudfn23biyourproject +# - IBEXA_ENVIRONMENT=main +# - IBEXA_APP=app +# 4. `ddev restart` +# 5. Run `ddev pull ibexa-cloud`. After you agree to the prompt, the current upstream database and files will be downloaded. +# 6. Optionally use `ddev push ibexa-cloud` to push local files and database to Ibexa Cloud. Note that `ddev push` is a command that can potentially damage your production site, so this is not recommended. + +# If you have more than one database on your Ibexa Cloud project, +# you will likely to choose which one you want to use +# as the primary database ('db'). +# Do this by setting PRIMARY_RELATIONSHIP, for example, `ddev config --web-environment-add=PRIMARY_RELATIONSHIP=main` +# or run `ddev pull ibexa-cloud` with the environment variable, for example +# `ddev pull ibexa-cloud -y --environment=PRIMARY_RELATIONSHIP=main` +# If you need to change this `ibexa-cloud.yaml` recipe, you can change it to suit your needs, but remember to remove the "ddev-generated" line from the top. + +# Debugging: Use `ddev exec ibexa-cloud` to see what Ibexa Cloud knows about +# your configuration and whether it's working correctly. + +auth_command: + command: | + set -e -o pipefail + export IBEXA_CLI_NO_INTERACTION=1 + if [ -z "${IBEXA_CLI_TOKEN:-}" ]; then echo "Please make sure you have set IBEXA_CLI_TOKEN." && exit 1; fi + if [ -z "${IBEXA_PROJECT:-}" ]; then echo "Please make sure you have set IBEXA_PROJECT." && exit 1; fi + if [ -z "${IBEXA_ENVIRONMENT:-}" ]; then echo "Please make sure you have set IBEXA_ENVIRONMENT." && exit 1; fi + ibexa_cloud environment:activate -p ${IBEXA_PROJECT} ${IBEXA_ENVIRONMENT} 2>/dev/null || true + +db_pull_command: + command: | + # set -x # You can enable bash debugging output by uncommenting + set -eu -o pipefail + export IBEXA_CLI_NO_INTERACTION=1 + # /tmp/db_relationships.yaml is the full yaml output of the database relationships + db_relationships_file=/tmp/db_relationships.yaml + RELATIONSHIPS="" ibexa_cloud relationships -y -p "${IBEXA_PROJECT}" -e "${IBEXA_ENVIRONMENT}" ${IBEXA_APP:+"--app=${IBEXA_APP}"} | yq 'with_entries(select(.[][].type == "mariadb:*" or .[][].type == "*mysql:*" or .[][].type == "postgresql:*")) ' >${db_relationships_file} + db_relationships=($(yq ' keys | .[] ' ${db_relationships_file})) + db_names=($(yq '.[][].path' ${db_relationships_file})) + db_count=${#db_relationships[@]} + # echo "db_relationships=${db_relationships} sizeof db_relationships=${#db_relationships[@]} db_names=${db_names} db_count=${db_count} PRIMARY_RELATIONSHIP=${PRIMARY_RELATIONSHIP}" + # If we have only one database, import it into local database named 'db' + if [ ${#db_names[@]} -eq 1 ]; then db_names[0]="db"; fi + + for (( i=0; i<${#db_relationships[@]}; i++ )); do + db_name=${db_names[$i]} + rel=${db_relationships[$i]} + # if PRIMARY_RELATIONSHIP is set, then when doing that one, import it into local database 'db' + if [ "${rel}" = "${PRIMARY_RELATIONSHIP:-notset}" ] ; then + echo "PRIMARY_RELATIONSHIP=${PRIMARY_RELATIONSHIP:-} so using it as database 'db' instead of the upstream '${db_name}'" + db_name="db" + fi + + ibexa_cloud db:dump --yes ${IBEXA_APP:+"--app=${IBEXA_APP}"} --relationship=${rel} --gzip --file=/var/www/html/.ddev/.downloads/${db_name}.sql.gz --project="${IBEXA_PROJECT:-setme}" --environment="${IBEXA_ENVIRONMENT:-setme}" + done + echo "Downloaded db dumps for databases '${db_names[@]}'" + +files_import_command: + command: | + #set -x # You can enable bash debugging output by uncommenting + set -eu -o pipefail + export IBEXA_CLI_NO_INTERACTION=1 + # Use $MOUNTS if it exists to get list of mounts to download, otherwise just web/sites/default/files (drupal) + declare -a mounts=(${MOUNTS:-/web/sites/default/files}) + ibexa_cloud mount:download --all --yes --quiet --project="${IBEXA_PROJECT}" --environment="${IBEXA_ENVIRONMENT}" ${IBEXA_APP:+"--app=${IBEXA_APP}"} --target=/var/www/html + + +# push is a dangerous command. If not absolutely needed it's better to delete these lines. +db_push_command: + command: | + # set -x # You can enable bash debugging output by uncommenting + set -eu -o pipefail + export IBEXA_CLI_NO_INTERACTION=1 + pushd /var/www/html/.ddev/.downloads >/dev/null; + if [ "${PRIMARY_RELATIONSHIP:-}" != "" ] ; then + rel="--relationship ${PRIMARY_RELATIONSHIP}" + fi + gzip -dc db.sql.gz | ibexa_cloud db:sql --project="${IBEXA_PROJECT}" ${rel:-} --environment="${IBEXA_ENVIRONMENT}" ${IBEXA_APP:+"--app=${IBEXA_APP}"} + +# push is a dangerous command and should not be used against a production target +files_push_command: + command: | + # set -x # You can enable bash debugging output by uncommenting + set -eu -o pipefail + export IBEXA_CLI_NO_INTERACTION=1 + mounts=$(ibexa_cloud mount:list --project=${IBEXA_PROJECT} --environment=${IBEXA_ENVIRONMENT} --paths) + for item in ${mounts}; do + ibexa_cloud mount:upload --yes --project="${IBEXA_PROJECT}" --environment="${IBEXA_ENVIRONMENT}" ${IBEXA_APP:+"--app=${IBEXA_APP}"} --source="${item}" --mount=${item} + done \ No newline at end of file diff --git a/tests/test.bats b/tests/test.bats index 2648a60..e05ea94 100644 --- a/tests/test.bats +++ b/tests/test.bats @@ -1,20 +1,60 @@ setup() { - set -eu -o pipefail + # set -u does not work with bats-assert + set -e -o pipefail + TEST_BREW_PREFIX="$(brew --prefix)" + load "${TEST_BREW_PREFIX}/lib/bats-support/load.bash" + load "${TEST_BREW_PREFIX}/lib/bats-assert/load.bash" + load "${TEST_BREW_PREFIX}/lib/bats-file/load.bash" + + export DIR="$( cd "$( dirname "$BATS_TEST_FILENAME" )" >/dev/null 2>&1 && pwd )/.." - export TESTDIR=~/tmp/test-addon-template + export TESTDIR=~/tmp/test-ibexa-cloud mkdir -p $TESTDIR - export PROJNAME=test-addon-template + export PROJNAME=test-ibexa-cloud export DDEV_NONINTERACTIVE=true ddev delete -Oy ${PROJNAME} >/dev/null 2>&1 || true cd "${TESTDIR}" ddev config --project-name=${PROJNAME} - ddev start -y >/dev/null + cp -r ${DIR}/tests/testdata/.platform.app.yaml ${DIR}/tests/testdata/.platform ${TESTDIR} +} + +pull_health_checks() { + # set -x + rm -rf ${TESTDIR}/var/encore/* + run ddev pull ibexa-cloud -y + assert_success + run ddev mysql -e 'SELECT COUNT(*) from ezpage_zones;' + assert_success + assert_line --index 1 "13" + ddev mutagen sync + assert_file_exist "${TESTDIR}/var/encore/ibexa.richtext.config.manager.js" } +push_health_checks() { + # set -x + # Add a junk value into local database so we can test it arrives in push environment + ddev mysql -e "INSERT INTO ezpage_zones VALUES(18, 'junk');" + # make sure it doesn't already exist upstream + ddev ibexa_cloud db:sql -p ${IBEXA_PROJECT} -e push -- 'DELETE from ezpage_zones;' + run ddev ibexa_cloud db:sql -p ${IBEXA_PROJECT} -e push -- 'SELECT COUNT(*) FROM ezpage_zones WHERE id=18;' + assert_line --index 1 --regexp "^ *0 *" + + # Add a junk file into local mount so we can test it arrives in push + run ddev ibexa_cloud ssh -p ${IBEXA_PROJECT} -e push -- rm -f var/encore/junk.txt + assert_success + # Verify that it doesn't exist to start with + run ddev ibexa_cloud ssh -p ${IBEXA_PROJECT} -e push -- ls var/encore/junk.txt + assert_failure + touch ${TESTDIR}/var/encore/junk.txt + ddev mutagen sync -health_checks() { - # Do something useful here that verifies the add-on - # ddev exec "curl -s elasticsearch:9200" | grep "${PROJNAME}-elasticsearch" - ddev exec "curl -s https://localhost:443/" + run ddev push ibexa-cloud --environment=IBEXA_ENVIRONMENT=push -y + assert_success + # Verify that our new record now exists + run ddev ibexa_cloud db:sql -p ${IBEXA_PROJECT} -e push -- 'SELECT name FROM ezpage_zones WHERE id=18;' + assert_output --partial junk + # Verify the new file exists + run ddev ibexa_cloud ssh -p ${IBEXA_PROJECT} -e push -- ls var/encore/junk.txt + assert_success } teardown() { @@ -25,20 +65,29 @@ teardown() { } @test "install from directory" { - set -eu -o pipefail + # bats-assert doesn't work with set -u + set -e -o pipefail cd ${TESTDIR} echo "# ddev get ${DIR} with project ${PROJNAME} in ${TESTDIR} ($(pwd))" >&3 ddev get ${DIR} - ddev restart - health_checks + ddev config --web-environment=IBEXA_CLI_TOKEN=${IBEXA_CLI_TOKEN},IBEXA_PROJECT=${IBEXA_PROJECT},IBEXA_ENVIRONMENT=pull + ddev restart >/dev/null + echo "# pull health checks" >&3 + pull_health_checks + echo "# push health checks" >&3 + push_health_checks } # bats test_tags=release @test "install from release" { set -eu -o pipefail cd ${TESTDIR} || ( printf "unable to cd to ${TESTDIR}\n" && exit 1 ) - echo "# ddev get ddev/ddev-addon-template with project ${PROJNAME} in ${TESTDIR} ($(pwd))" >&3 - ddev get ddev/ddev-addon-template + echo "# ddev get rfay/ddev-ibexa-cloud with project ${PROJNAME} in ${TESTDIR} ($(pwd))" >&3 + ddev get rfay/ddev-ibexa-cloud + ddev config --web-environment=IBEXA_CLI_TOKEN=${IBEXA_CLI_TOKEN},IBEXA_PROJECT=${IBEXA_PROJECT:-},IBEXA_ENVIRONMENT=pull ddev restart >/dev/null - health_checks + echo "# pull health checks" >&3 + pull_health_checks + echo "# push health checks" >&3 + push_health_checks } diff --git a/tests/testdata/.platform.app.yaml b/tests/testdata/.platform.app.yaml new file mode 100644 index 0000000..a4f2e55 --- /dev/null +++ b/tests/testdata/.platform.app.yaml @@ -0,0 +1,250 @@ +# This file describes an application. You can have multiple applications +# in the same project. + +# Please see https://doc.ibexa.co/en/latest/getting_started/install_on_ibexa_cloud/ for Ibexa DXP specific getting started instructions. +# Full documentation: https://docs.platform.sh +# Ibexa DXP requirements: https://doc.ibexa.co/en/latest/getting_started/requirements/#ibexa-cloud-requirements-and-setup + +# The name of this app. Must be unique within a project. +name: app + +dependencies: + nodejs: + yarn: "*" + n: "*" + php: + composer/composer: '^2.0' + +# The type of the application to build. +type: php:8.3 + +build: + # "none" means we're running composer manually, see build hook + # We currently need to do this to install newer version of Node.js + flavor: "none" + +# The relationships of the application with services or other applications. +# The left-hand side is the name of the relationship as it will be exposed +# to the application in the PLATFORM_RELATIONSHIPS variable. The right-hand +# side is in the form `:`. +relationships: + database: 'mysqldb:user' + # Uncomment if you want to store dfs tables in a separate database: + #dfs_database: 'mysqldb:dfs' + rediscache: 'rediscache:redis' + # [Recommended] To have an isolated and persisted Redis instance for sessions, uncomment + # this relationship and the corresponding service in .platform/services.yaml + #redissession: 'redissession:redis' + # If you wish to use solr, uncomment this relationship and the corresponding service in .platform/services.yaml + #solr: 'solrsearch:collection1' + # If you wish to use elasticsearch, uncomment this relationship and the corresponding service in .platform/services.yaml + #elasticsearch: 'elasticsearch:elasticsearch' + +variables: + php: + # Example of setting php.ini config + #"display_errors": "On" + memory_limit: 512M + # The default OPcache configuration is not suited for Symfony applications + opcache.memory_consumption: 256 + opcache.max_accelerated_files: 20000 + # We recommend enabling the following opcache.validate_timestamps setting in production, but then opcache_reset() must be called every time you clear symfony cache. + #opcache.validate_timestamps: 0 + + # Applications that open many PHP files, such as Symfony projects, should use at least these values + realpath_cache_size: 4096K + realpath_cache_ttl: 600 + env: + # Location for N to store node versions + N_PREFIX: /app/.global + # We disable Symfony Proxy (CacheKernel), as we rather use Varnish + APP_HTTP_CACHE: 0 + # Warning: Only for Varnish on Platform.sh to workaround missing IP. Disable if you use Fastly or Symfony Proxy where this would be a security issue! + TRUSTED_PROXIES: "REMOTE_ADDR" + # Change this if you use a different env than "prod" + # If you change to "dev" remove "--no-dev" from the `composer install` command. + APP_ENV: prod + # Uncomment if you want to use DFS clustering: + # NOTE: Recommended on PE Dedicated cluster setup. Required if using Legacy Bridge on PE dedicated cluster setup. + #PLATFORMSH_DFS_NFS_PATH: 'dfsdata' + +# The configuration of app when it is exposed to the web. +web: + locations: + "/": + # The public directory of the app, relative to its root. + root: "public" + # The front-controller script to send non-static requests to. + passthru: "/index.php" + # The number of seconds whitelisted (static) content should be cache + expires: 600 + # Deny by default, allow in rules below as necessary + allow: false + rules: + # This appears redundant, but looks can be deceiving. + # Disable .php(3) and other executable extensions in the var directory + '^/var/.*(?i)\.(php3?|phar|phtml|sh|exe|pl|bin)$': + allow: false + # Serve storage/images|images-versioned directly + '^/var/([^/]+/)?storage/images(-versioned)?/.*': + allow: true + '^/favicon\.ico': + allow: true + '^/robots\.txt': + allow: true + '^/bundles/': + allow: true + '^/assets/': + allow: true + '^/build/': + allow: true + # Comment this if in "dev" mode + '^/(css|js|fonts?)/.*\.(css|js|otf|eot|ttf|svg|woff)': + allow: true + +# The size of the persistent disk of the application (in MB). +disk: 3072 + +# The mounts that will be performed when the package is deployed. +mounts: + # PE Cluster Note: By default will set all to shared, so if moving to PE dedicated cluster you should ask platform.sh + # Support to make sure at least cache + logs are local, while you can let public/var be shared if you prefer that over DFS. + 'var/cache': + source: local + source_path: cache + 'var/log': + source: local + source_path: log + # [PE Cluster] For cluster it's recommended to rather use a performant shared session storage like Redis/Memcached. + 'var/sessions': + source: local + source_path: sessions + 'var/encore': + source: local + source_path: encore + 'public/var': + source: local + source_path: var + 'src/Migrations/Ibexa': + source: local + source_path: migrations + 'config/graphql/types/ibexa': + source: local + source_path: graphql_types +# Uncomment if you need to use Kaliop Migrations on your setup and not able to get it to write to "var" dir. +# 'src/AppBundle/MigrationVersions/References': +# source: local +# source_path: MigrationVersionsReferences +# Uncomment if you want to use DFS clustering, required if using Legacy Bridge on PE dedicated cluster setup. +# 'dfsdata': +# # Platform.sh Staff: This MUST be shared on cluster, all others SHOULD be local for performance reasons +# source: local +# source_path: dfsdata + +# The hooks that will be performed when the package is deployed. +hooks: + # Build hook, done before connected to services, disk is still writable here + build: | + set -e + + # Install the version specified in the .nvmrc file + # but only if N command exists + hash n && n auto + + # Reset the location hash to recognize the newly installed version + hash -r + + if [ -z "$COMPOSER_AUTH" ]; then + echo "TIP: If you need to authenticate against Github/Gitlab/updates.ibexa.co, use COMPOSER_AUTH env variable" + echo "See: https://docs.platform.sh/guides/general/composer-auth.html#set-the-envcomposer_auth-project-variable" + fi + + composer install --no-dev --prefer-dist --no-progress --no-interaction --optimize-autoloader + + # Deploy hook, access to services & done once (per cluster, not per node), only mounts are writable at this point + # Note: Http traffic is paused while this is running, so for prod code this should finish as fast as possible, < 30s + deploy: | + set -e + + if [ ! -f public/var/.platform.installed ]; then + # Configure ElasticSearch mappings + ##php bin/console ibexa:elasticsearch:put-index-template + + # To workaround issues with p.sh Varnish we clear container cache & temporary set Symfony Proxy + export SKIP_HTTPCACHE_PURGE="1" + rm -Rf var/cache/$APP_ENV/* + php bin/console cache:pool:clear cache.redis + sh bin/platformsh_prestart_cacheclear.sh + SKIP_HTTPCACHE_PURGE="1" php -d memory_limit=-1 bin/console ibexa:install + unset SKIP_HTTPCACHE_PURGE + php bin/console ibexa:graphql:generate-schema + + touch public/var/.platform.installed + fi + + # Now that mounts are available, clear cache on mount. + # Note: Skip on PE Cluster setup using e.g. "if [$PLATFORM_BRANCH" != 'production']; then" & get p.sh to enable this on internal per node "pre_start" hook + sh bin/platformsh_prestart_cacheclear.sh + + # If you also need to clear Redis cache on every deploy, you can either use this command or redis-cli + # Normally this should only be needed if cached data structures changes (upgrades), or you change data via sql (e.g. restore backup) + ##php bin/console cache:pool:clear cache.redis + + # Example of additional deploy hooks if you use doctrine and/or kaliop migration bundle + ##php bin/console doctrine:migrations:migrate --no-interaction --allow-no-migration + ##php bin/console kaliop:migration:migrate --no-interaction --no-debug + + # When using Solr / ElasticSearch, there are two cases where you'll need to rebuild indexes: + # - When Solr / ElasticSearch / search configuration changes + # - On database import/restore + # So in development it might be convenient to rebuild indexes, slowing down deploy time + ##php bin/console ibexa:reindex --processes=auto + + # When using Varnish/Fastly, HttpCache is not cleared when clearing Symfony Cache folder above, you need to + # clear cache yourself when for instance templates or config affecting responses change, for instance with: + ##bin/console fos:httpcache:invalidate:tag ez-all + # Depending on your VCL, buy default this would trigger a soft purge (expiry) and allow grace period, however + # even so if your change only affected certain subset of content, ideally you should only clear specific tags: + ##bin/console fos:httpcache:invalidate:tag l44 c33 ct2 ... + + # It is possible to manipulate your Fastly VCL configuration directly from command line using Fastly CLI. + # Official documentation on how to install: https://developer.fastly.com/learning/tools/cli#installing + # Example VCL snippet upload using vcl_recv hook (remember about FASTLY_SERVICE_ID and FASTLY_KEY environmental variables): + ##fastly vcl custom create --name="Ibexa VCL" --main --version=latest --autoclone --content=vendor/ibexa/fastly/fastly/ez_main.vcl + ##fastly vcl snippet create --name="Shielding" --version=active --autoclone --type recv --content=vendor/ibexa/fastly/fastly/snippet_re_enable_shielding.vcl + + # Post deploy hook, like deploy but after being deployed and live, for deploy tasks we can do asynchronously + # Tip: As this is running while web is running, and it's async, avoid doing anything like cache clearing that affects web requests here. + #post_deploy: | + # set -e + +# The configuration of scheduled execution. +# see https://docs.platform.sh/configuration/app/cron.html#cron-jobs +# +# Tip: Crons work like on traditional dedicated servers, they share resources with web, so: +# - For long runing heavy operations, consider using background "workers" with event queuing. +# - To disable certain features during a cronjob, you can usually do it with env variable or php variable, example: "php -d newrelic.enabled=false bin/console my:import" +crons: + frequent: + # NOTE: The minimum interval between cron runs is 5 minutes, even if specified as less. + # Except for PE. There crons can be run every minute. + # So if you are not on PE please change specs to "*/5 * * * *" to avoid warning on each deploy. + spec: "* * * * *" + cmd: "php bin/console ibexa:cron:run" + weekly: + spec: "0 0 * * 0" + cmd: "php bin/console ibexa:check-urls --quiet" + +runtime: + extensions: + - xsl + - imagick + - readline + - redis + - igbinary + - sodium + #- apcu + #- name: 'blackfire' + # configuration: + # server_id: 'xxxx-xxx-xxx-xxx-xxxx' + # server_token: 'xxxx' diff --git a/tests/testdata/.platform/local/README.txt b/tests/testdata/.platform/local/README.txt new file mode 100644 index 0000000..20b0763 --- /dev/null +++ b/tests/testdata/.platform/local/README.txt @@ -0,0 +1,8 @@ +.platform/local +=============== + +This directory is where the Ibexa Cloud CLI stores configuration files, builds, and +other data to help work with your project locally. + +It is not used on remote environments at all - the directory is excluded from +your Git repository (via .git/info/exclude). diff --git a/tests/testdata/.platform/local/project.yaml b/tests/testdata/.platform/local/project.yaml new file mode 100644 index 0000000..87257b2 --- /dev/null +++ b/tests/testdata/.platform/local/project.yaml @@ -0,0 +1,2 @@ +id: 4dz4gstt4twgo +host: ca-1.platform.sh diff --git a/tests/testdata/.platform/routes.yaml b/tests/testdata/.platform/routes.yaml new file mode 100644 index 0000000..1525d92 --- /dev/null +++ b/tests/testdata/.platform/routes.yaml @@ -0,0 +1,11 @@ +"https://{default}/": + type: upstream + upstream: "varnish:http" + cache: + # As this does not support Vary, and purging, we can't use this as Sf Proxy drop in. + # However it is possible to enable this for anonymous traffic when backend sends expiry headers. + enabled: false + +"https://www.{default}/": + type: redirect + to: "https://{default}/" diff --git a/tests/testdata/.platform/services.yaml b/tests/testdata/.platform/services.yaml new file mode 100644 index 0000000..d2c5138 --- /dev/null +++ b/tests/testdata/.platform/services.yaml @@ -0,0 +1,87 @@ +# Default settings in order to set up Ibexa DXP installation on Ibexa Cloud dev instances +# +# Note: Like on own servers, make sure to tune Redis/Solr/Varnish/MySQL memory/disk size for your installation to avoid issues. +# Reach out to platform.sh support to get help on this and insight into your disk/memory usage. + +mysqldb: + type: mariadb:10.4 + disk: 1024 + configuration: + schemas: + - main + # Uncomment if you want to store dfs tables in a separate database: + #- dfs + endpoints: + user: + default_schema: main + privileges: + main: admin + # Uncomment if you want to store dfs tables in a separate database: + #dfs: + # default_schema: dfs + # privileges: + # dfs: admin + +# For use by Symfony Cache (used by Ibexa DXP SPI Persistence Cache) +rediscache: + type: 'redis:5.0' + # For cache you might need to increase the size of your plan if your installation has a sizeable amount of content. + # Check with platform.sh staff if in doubt on this, and if it would make sense to configure larger redis size here. + # size: L + configuration: + # Note: If using RedisTagAwareAdapter it requires one of the 'volatile-*' eviction policies + # https://docs.platform.sh/configuration/services/redis.html#eviction-policy + # https://doc.ezplatform.com/en/latest/getting_started/requirements/ + maxmemory_policy: volatile-lru + +# If you wish to have a separate Redis instance for sessions, uncomment +# this service and the corresponding relationship in .platform.app.yaml. +#redissession: +# type: 'redis:5.0' +# configuration: +# maxmemory_policy: allkeys-lru +# +# Alternatively if you have a requirement that sessions are persisted across server/redis restarts, +# have storage space to spare for this, and don't mind a bit slower instance type of redis +#redissession: +# type: redis-persistent:5.0 +# Disk size should be bigger than Redis' "maxmemory" setting due to https://redis.io/topics/persistence#log-rewriting. +# The memory given to Redis depends on your plan and "size: ". Adjust "disk: " accordingly. +# disk: 512 +# configuration: +# maxmemory_policy: allkeys-lru + +# If you wish to use solr, uncomment this service and the corresponding relationship in .platform.app.yaml. +# Also, you need to generate the config using: +# vendor/ezsystems/ezplatform-solr-search-engine/bin/generate-solr-config.sh +# Multi core setup is currently not supported on Platform.sh. Sharding does not work as the cores are +# unable to reach each other +#solrsearch: +# type: solr:7.7 +# disk: 512 +# configuration: +# configsets: +# mainconfig: !archive "configsets/solr6" +# cores: +# collection1: +# core_properties: | +# configSet=mainconfig +# schema=schema.xml +# endpoints: +# collection1: +# core: collection1 + +# If you wish to use elasticsearch, uncomment this service and the corresponding relationship in .platform.app.yaml. +#elasticsearch: +# type: elasticsearch:7.7 +# disk: 512 + +# Due to logic in app/config/env/platformsh.php, do not change the service name to something different than 'varnish' +varnish: + type: 'varnish:6.0' + relationships: + app: "app:http" + configuration: + vcl: !include + type: string + path: varnish.vcl diff --git a/tests/testdata/.platform/varnish.vcl b/tests/testdata/.platform/varnish.vcl new file mode 100644 index 0000000..920fac8 --- /dev/null +++ b/tests/testdata/.platform/varnish.vcl @@ -0,0 +1,340 @@ +// Varnish VCL for Platform.sh with: +// - Varnish 6.0 or higher (6.0LTS recommended, and is what we mainly test against) +// - Varnish xkey vmod (via varnish-modules package 0.10.2 or higher, or via Varnish Plus) +// - eZ Platform 3.x or higher with ezplatform-http-cache (this) bundle +// + +// Not applicable on Platform.sh: +//vcl 4.0; +//import std; +import xkey; + +// Includes not available on Platform.sh, so inlining parameters.vlc: +acl invalidators { + "127.0.0.1"; + "192.168.0.0"/16; +} + +// ACL for debuggers IP +acl debuggers { + "127.0.0.1"; + "192.168.0.0"/16; +} + +// Called at the beginning of a request, after the complete request has been received +sub vcl_recv { + + // Set the backend + //set req.backend_hint = ezplatform; + // Platform.sh specific: + set req.backend_hint = app.backend(); + + // Add a Surrogate-Capability header to announce ESI support. + set req.http.Surrogate-Capability = "abc=ESI/1.0"; + + // Ensure that the Symfony Router generates URLs correctly with Varnish + if (req.http.X-Forwarded-Proto == "https" ) { + set req.http.X-Forwarded-Port = "443"; + } else { + set req.http.X-Forwarded-Port = "80"; + } + + // Trigger cache purge if needed + call ez_purge; + + // Don't cache requests other than GET and HEAD. + if (req.method != "GET" && req.method != "HEAD") { + return (pass); + } + + // Don't cache Authenticate & Authorization + // You may remove this when using REST API with basic auth. + if (req.http.Authenticate || req.http.Authorization) { + if (client.ip ~ debuggers) { + set req.http.X-Debug = "Not Cached according to configuration (Authorization)"; + } + return (hash); + } + + // Remove all cookies besides Session ID, as JS tracker cookies and so will make the responses effectively un-cached + if (req.http.cookie) { + set req.http.cookie = ";" + req.http.cookie; + set req.http.cookie = regsuball(req.http.cookie, "; +", ";"); + set req.http.cookie = regsuball(req.http.cookie, ";(eZSESSID[^=]*)=", "; \1="); + set req.http.cookie = regsuball(req.http.cookie, ";[^ ][^;]*", ""); + set req.http.cookie = regsuball(req.http.cookie, "^[; ]+|[; ]+$", ""); + + if (req.http.cookie == "") { + // If there are no more cookies, remove the header to get page cached. + unset req.http.cookie; + } + } + + // Do a standard lookup on assets (these don't vary by user context hash) + // Note that file extension list below is not extensive, so consider completing it to fit your needs. + if (req.url ~ "\.(css|js|gif|jpe?g|bmp|png|tiff?|ico|img|tga|wmf|svg|swf|ico|mp3|mp4|m4a|ogg|mov|avi|wmv|zip|gz|pdf|ttf|eot|wof)$") { + return (hash); + } + + // Sort the query string for cache normalization. + set req.url = std.querysort(req.url); + + // Retrieve client user context hash and add it to the forwarded request. + call ez_user_context_hash; + + // If it passes all these tests, do a lookup anyway. + return (hash); +} + +// Called when a cache lookup is successful. The object being hit may be stale: It can have a zero or negative ttl with only grace or keep time left. +sub vcl_hit { + if (obj.ttl >= 0s) { + // A pure unadulterated hit, deliver it + return (deliver); + } + + if (obj.ttl + obj.grace > 0s) { + // Object is in grace, logic below in this block is what differs from default: + // https://varnish-cache.org/docs/5.2/users-guide/vcl-grace.html#grace-mode + if (!std.healthy(req.backend_hint)) { + // Service is unhealthy, deliver from cache + return (deliver); + } else if (req.http.cookie) { + // Request it by a user with session, refresh the cache to avoid issues for editors and forum users + return (miss); + } + + // By default deliver cache, automatically triggers a background fetch + return (deliver); + } + + // fetch & deliver once we get the result + return (miss); +} + +// Called when the requested object has been retrieved from the backend +sub vcl_backend_response { + + if (bereq.http.accept ~ "application/vnd.fos.user-context-hash" + && beresp.status >= 500 + ) { + return (abandon); + } + + // Check for ESI acknowledgement and remove Surrogate-Control header + if (beresp.http.Surrogate-Control ~ "ESI/1.0") { + unset beresp.http.Surrogate-Control; + set beresp.do_esi = true; + } + + // Make Varnish keep all objects for up to 1 hour beyond their TTL, see vcl_hit for Request logic on this + set beresp.grace = 1h; + + // Compressing the content + if (beresp.http.Content-Type ~ "application/javascript" + || beresp.http.Content-Type ~ "application/json" + || beresp.http.Content-Type ~ "application/vnd.ms-fontobject" + || beresp.http.Content-Type ~ "application/vnd.ibexa.api" + || beresp.http.Content-Type ~ "application/x-font-ttf" + || beresp.http.Content-Type ~ "image/svg+xml" + || beresp.http.Content-Type ~ "text/css" + || beresp.http.Content-Type ~ "text/plain" + ) { + set beresp.do_gzip = true; + } +} + +// Handle purge +// You may add FOSHttpCacheBundle tagging rules +// See http://foshttpcache.readthedocs.org/en/latest/varnish-configuration.html#id4 +sub ez_purge { + // Retrieve purge token, needs to be here due to restart, match for PURGE method done within + call ez_invalidate_token; + + # Adapted with acl from vendor/friendsofsymfony/http-cache/resources/config/varnish/fos_tags_xkey.vcl + if (req.method == "PURGEKEYS") { + call ez_purge_acl; + + # If neither of the headers are provided we return 400 to simplify detecting wrong configuration + if (!req.http.xkey-purge && !req.http.xkey-softpurge) { + return (synth(400, "Neither header XKey-Purge or XKey-SoftPurge set")); + } + + # Based on provided header invalidate (purge) and/or expire (softpurge) the tagged content + set req.http.n-gone = 0; + set req.http.n-softgone = 0; + if (req.http.xkey-purge) { + set req.http.n-gone = xkey.purge(req.http.xkey-purge); + } + + if (req.http.xkey-softpurge) { + set req.http.n-softgone = xkey.softpurge(req.http.xkey-softpurge); + } + + return (synth(200, "Purged "+req.http.n-gone+" objects, expired "+req.http.n-softgone+" objects")); + } + + # Adapted with acl from vendor/friendsofsymfony/http-cache/resources/config/varnish/fos_purge.vcl + if (req.method == "PURGE") { + call ez_purge_acl; + + return (purge); + } +} + +sub ez_purge_acl { + if (req.http.x-invalidate-token) { + if (req.http.x-invalidate-token != req.http.x-backend-invalidate-token) { + return (synth(405, "Method not allowed")); + } + } else if (!client.ip ~ invalidators) { + return (synth(405, "Method not allowed")); + } +} + +// Sub-routine to get client user context hash, used to for being able to vary page cache on user rights. +sub ez_user_context_hash { + + // Prevent tampering attacks on the hash mechanism + if (req.restarts == 0 + && (req.http.accept ~ "application/vnd.fos.user-context-hash" + || req.http.x-user-context-hash + ) + ) { + return (synth(400, "Bad Request")); + } + + if (req.restarts == 0 && (req.method == "GET" || req.method == "HEAD")) { + // Backup accept header, if set + if (req.http.accept) { + set req.http.x-fos-original-accept = req.http.accept; + } + set req.http.accept = "application/vnd.fos.user-context-hash"; + + // Backup original URL + set req.http.x-fos-original-url = req.url; + set req.url = "/_fos_user_context_hash"; + + // Force the lookup, the backend must tell not to cache or vary on all + // headers that are used to build the hash. + return (hash); + } + + // Rebuild the original request which now has the hash. + if (req.restarts > 0 + && req.http.accept == "application/vnd.fos.user-context-hash" + ) { + set req.url = req.http.x-fos-original-url; + unset req.http.x-fos-original-url; + if (req.http.x-fos-original-accept) { + set req.http.accept = req.http.x-fos-original-accept; + unset req.http.x-fos-original-accept; + } else { + // If accept header was not set in original request, remove the header here. + unset req.http.accept; + } + + // Force the lookup, the backend must tell not to cache or vary on the + // user context hash to properly separate cached data. + + return (hash); + } +} + +// Sub-routine to get invalidate token. +sub ez_invalidate_token { + // Prevent tampering attacks on the token mechanisms + if (req.restarts == 0 + && (req.http.accept ~ "application/vnd.ezplatform.invalidate-token" + || req.http.x-backend-invalidate-token + ) + ) { + return (synth(400, "Bad Request")); + } + + if (req.restarts == 0 && (req.method == "PURGE" || req.method == "PURGEKEYS") && req.http.x-invalidate-token) { + set req.http.accept = "application/vnd.ezplatform.invalidate-token"; + + // Backup original http properties + set req.http.x-fos-token-url = req.url; + set req.http.x-fos-token-method = req.method; + + set req.url = "/_ibexa_http_invalidatetoken"; + + // Force the lookup + return (hash); + } + + // Rebuild the original request which now has the invalidate token. + if (req.restarts > 0 + && req.http.accept == "application/vnd.ezplatform.invalidate-token" + ) { + set req.url = req.http.x-fos-token-url; + set req.method = req.http.x-fos-token-method; + unset req.http.x-fos-token-url; + unset req.http.x-fos-token-method; + unset req.http.accept; + } +} + +sub vcl_deliver { + // On receiving the invalidate token response, copy the invalidate token to the original + // request and restart. + if (req.restarts == 0 + && resp.http.content-type ~ "application/vnd.ezplatform.invalidate-token" + ) { + set req.http.x-backend-invalidate-token = resp.http.x-invalidate-token; + + return (restart); + } + + // On receiving the hash response, copy the hash header to the original + // request and restart. + if (req.restarts == 0 + && resp.http.content-type ~ "application/vnd.fos.user-context-hash" + ) { + set req.http.x-user-context-hash = resp.http.x-user-context-hash; + + return (restart); + } + + // If we get here, this is a real response that gets sent to the client. + + // Remove the vary on user context hash, this is nothing public. Keep all + // other vary headers. + if (resp.http.Vary ~ "X-User-Context-Hash") { + set resp.http.Vary = regsub(resp.http.Vary, "(?i),? *X-User-Context-Hash *", ""); + set resp.http.Vary = regsub(resp.http.Vary, "^, *", ""); + if (resp.http.Vary == "") { + unset resp.http.Vary; + } + + // If we vary by user hash, we'll also adjust the cache control headers going out by default to avoid sending + // large ttl meant for Varnish to shared proxies and such. We assume only session cookie is left after vcl_recv. + if (req.http.cookie) { + // When in session where we vary by user hash we by default avoid caching this in shared proxies & browsers + // For browser cache with it revalidating against varnish, use for instance "private, no-cache" instead + set resp.http.cache-control = "private, no-cache, no-store, must-revalidate"; + } else if (resp.http.cache-control ~ "public") { + // For non logged in users we allow caching on shared proxies (mobile network accelerators, planes, ...) + // But only for a short while, as there is no way to purge them + set resp.http.cache-control = "public, s-maxage=600, stale-while-revalidate=300, stale-if-error=300"; + } + } + + if (client.ip ~ debuggers) { + // Add X-Cache header if debugging is enabled + if (obj.hits > 0) { + set resp.http.X-Cache = "HIT"; + set resp.http.X-Cache-Hits = obj.hits; + set resp.http.X-Cache-TTL = obj.ttl; + } else { + set resp.http.X-Cache = "MISS"; + } + } else { + // Remove tag headers when delivering to non debug client + unset resp.http.xkey; + // Sanity check to prevent ever exposing the hash to a non debug client. + unset resp.http.x-user-context-hash; + } +} diff --git a/web-build/Dockerfile.ibexa-cloud b/web-build/Dockerfile.ibexa-cloud new file mode 100644 index 0000000..793f595 --- /dev/null +++ b/web-build/Dockerfile.ibexa-cloud @@ -0,0 +1,4 @@ +#ddev-generated +ENV PLATFORMSH_CLI_HOME=/usr/local/bin +RUN curl -sfS https://cli.ibexa.cloud/installer | php +RUN cp /root/.ibexa-cli/bin/ibexa_cloud /usr/local/bin \ No newline at end of file