diff --git a/.asf.yaml b/.asf.yaml index 1ec6a2f34d40..ccdb5654498f 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -22,11 +22,11 @@ github: - api-gateway - cloud-native - nginx - - lua - luajit - apigateway - microservices - api + - apis - loadbalancing - reverse-proxy - api-management @@ -36,6 +36,9 @@ github: - devops - kubernetes - docker + - kubernetes-ingress + - kubernetes-ingress-controller + - service-mesh enabled_merge_buttons: squash: true @@ -50,6 +53,10 @@ github: dismiss_stale_reviews: true require_code_owner_reviews: true required_approving_review_count: 2 + release/2.15: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 release/2.14: required_pull_request_reviews: require_code_owner_reviews: true diff --git a/.github/actions/action-semantic-pull-request b/.github/actions/action-semantic-pull-request new file mode 160000 index 000000000000..348e2e692213 --- /dev/null +++ b/.github/actions/action-semantic-pull-request @@ -0,0 +1 @@ +Subproject commit 348e2e6922130ee27d6d6a0a3b284890776d1f80 diff --git a/.github/semantic.yml b/.github/semantic.yml deleted file mode 100644 index 5fe591ed88af..000000000000 --- a/.github/semantic.yml +++ /dev/null @@ -1,15 +0,0 @@ -titleOnly: true -allowRevertCommits: true -types: - - feat - - fix - - docs - - style - - refactor - - perf - - test - - build - - ci - - chore - - revert - - change diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bd199672e7ad..5d34c0330a5c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,13 +25,13 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_17 + - linux_openresty_1_19 test_dir: - t/plugin - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc runs-on: ${{ matrix.platform }} @@ -67,6 +67,21 @@ jobs: echo "##[set-output name=version;]$(echo ${GITHUB_REF##*/})" echo "##[set-output name=fullname;]$(echo apache-apisix-${GITHUB_REF##*/}-src.tgz)" + - name: Extract test type + shell: bash + id: test_env + run: | + test_dir="${{ matrix.test_dir }}" + if [[ $test_dir =~ 't/plugin' ]]; then + echo "##[set-output name=type;]$(echo 'plugin')" + fi + if [[ $test_dir =~ 't/admin ' ]]; then + echo "##[set-output name=type;]$(echo 'first')" + fi + if [[ $test_dir =~ ' t/xrpc' ]]; then + echo "##[set-output name=type;]$(echo 'last')" + fi + - name: Linux launch common services run: | make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml @@ -82,32 +97,29 @@ jobs: rm -rf $(ls -1 --ignore=*.tgz --ignore=ci --ignore=t --ignore=utils --ignore=.github) tar zxvf ${{ steps.branch_env.outputs.fullname }} - - name: Build wasm code - if: matrix.os_name == 'linux_openresty' + - name: Start CI env (FIRST_TEST) + if: steps.test_env.outputs.type == 'first' run: | - export TINYGO_VER=0.20.0 - wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null - sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb - cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p + # launch deps env + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml - - name: Build xDS library + - name: Start CI env (PLUGIN_TEST) + if: steps.test_env.outputs.type == 'plugin' run: | - cd t/xds-library - go build -o libxds.so -buildmode=c-shared main.go export.go + sh ci/pod/openfunction/build-function-image.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh - - name: Linux Before install - run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install - - - name: Start CI env + - name: Start CI env (LAST_TEST) + if: steps.test_env.outputs.type == 'last' run: | # generating SSL certificates for Kafka sudo keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit - # launch deps env - make ci-env-up - sudo ./ci/linux-ci-init-service.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh - name: Start Dubbo Backend - if: matrix.os_name == 'linux_openresty' + if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin' run: | sudo apt install -y maven cd t/lib/dubbo-backend @@ -115,6 +127,23 @@ jobs: cd dubbo-backend-provider/target java -Djava.net.preferIPv4Stack=true -jar dubbo-demo-provider.one-jar.jar > /tmp/java.log & + - name: Build xDS library + if: steps.test_env.outputs.type == 'last' + run: | + cd t/xds-library + go build -o libxds.so -buildmode=c-shared main.go export.go + + - name: Build wasm code + if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'last' + run: | + export TINYGO_VER=0.20.0 + wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null + sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb + cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p + + - name: Linux Before install + run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install + - name: Linux Install run: | sudo --preserve-env=OPENRESTY_VERSION \ diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml index 9b2f8fc81b9d..584ffac3bf20 100644 --- a/.github/workflows/centos7-ci.yml +++ b/.github/workflows/centos7-ci.yml @@ -29,7 +29,7 @@ jobs: matrix: test_dir: - t/plugin - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library steps: @@ -45,6 +45,21 @@ jobs: run: | echo "##[set-output name=version;]$(echo ${GITHUB_REF##*/})" + - name: Extract test type + shell: bash + id: test_env + run: | + test_dir="${{ matrix.test_dir }}" + if [[ $test_dir =~ 't/plugin' ]]; then + echo "##[set-output name=type;]$(echo 'plugin')" + fi + if [[ $test_dir =~ 't/admin ' ]]; then + echo "##[set-output name=type;]$(echo 'first')" + fi + if [[ $test_dir =~ ' t/xds-library' ]]; then + echo "##[set-output name=type;]$(echo 'last')" + fi + - name: Linux launch common services run: | make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml @@ -66,6 +81,7 @@ jobs: rm -rf $(ls -1 --ignore=apisix-build-tools --ignore=t --ignore=utils --ignore=ci --ignore=Makefile --ignore=rockspec) - name: Build xDS library + if: steps.test_env.outputs.type == 'last' run: | cd t/xds-library go build -o libxds.so -buildmode=c-shared main.go export.go @@ -77,12 +93,25 @@ jobs: docker run -itd -v /home/runner/work/apisix/apisix:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash # docker exec centos7Instance bash -c "cp -r /tmp/apisix ./" - - name: Run other docker containers for test + - name: Start CI env (FIRST_TEST) + if: steps.test_env.outputs.type == 'first' + run: | + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + + - name: Start CI env (PLUGIN_TEST) + if: steps.test_env.outputs.type == 'plugin' + run: | + sh ci/pod/openfunction/build-function-image.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh + + - name: Start CI env (LAST_TEST) + if: steps.test_env.outputs.type == 'last' run: | # generating SSL certificates for Kafka keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit - make ci-env-up - ./ci/linux-ci-init-service.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh - name: Install dependencies run: | diff --git a/.github/workflows/cli-master.yml b/.github/workflows/cli-master.yml index 58a34d8b187a..fc6b76c26c4b 100644 --- a/.github/workflows/cli-master.yml +++ b/.github/workflows/cli-master.yml @@ -26,7 +26,7 @@ jobs: matrix: job_name: - linux_apisix_master_luarocks - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 15 env: OPENRESTY_VERSION: default diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 342da39b2df8..3582d76183c4 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -25,7 +25,7 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 job_name: - linux_apisix_current_luarocks - linux_apisix_current_luarocks_in_customed_nginx diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml index d6b64921b0da..624a03e08dff 100644 --- a/.github/workflows/doc-lint.yml +++ b/.github/workflows/doc-lint.yml @@ -18,7 +18,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: 🚀 Use Node.js - uses: actions/setup-node@v3.3.0 + uses: actions/setup-node@v3.4.1 with: node-version: '12.x' - run: npm install -g markdownlint-cli@0.25.0 diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml index 426ebcc3768c..60dc602c561a 100644 --- a/.github/workflows/fuzzing-ci.yaml +++ b/.github/workflows/fuzzing-ci.yaml @@ -63,21 +63,15 @@ jobs: - name: install boofuzz run: | + # Avoid "ERROR: flask has requirement click>=8.0, but you'll have click 7.0 which is incompatible" + sudo apt remove python3-click pip install -r $PWD/t/fuzzing/requirements.txt - - name: run simpleroute test + - name: run tests run: | python $PWD/t/fuzzing/simpleroute_test.py - - - name: run serverless route test - run: | python $PWD/t/fuzzing/serverless_route_test.py - - - name: run vars route test - run: | python $PWD/t/fuzzing/vars_route_test.py - - - name: run check leak test - run: | python $PWD/t/fuzzing/client_abort.py python $PWD/t/fuzzing/simple_http.py + python $PWD/t/fuzzing/http_upstream.py diff --git a/.github/workflows/kubernetes-ci.yml b/.github/workflows/kubernetes-ci.yml index 66615cf80cc1..9800e681f55f 100644 --- a/.github/workflows/kubernetes-ci.yml +++ b/.github/workflows/kubernetes-ci.yml @@ -25,10 +25,10 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_17 + - linux_openresty_1_19 runs-on: ${{ matrix.platform }} timeout-minutes: 15 diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml index 697a956512cb..55abed61cbc5 100644 --- a/.github/workflows/license-checker.yml +++ b/.github/workflows/license-checker.yml @@ -32,6 +32,6 @@ jobs: steps: - uses: actions/checkout@v3 - name: Check License Header - uses: apache/skywalking-eyes@v0.3.0 + uses: apache/skywalking-eyes@v0.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2338100168a7..2ba48fdec53f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,7 +32,7 @@ jobs: uses: actions/checkout@v3 - name: Setup Nodejs env - uses: actions/setup-node@v3.3.0 + uses: actions/setup-node@v3.4.1 with: node-version: '12' diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index 0f022df602b6..786c43362898 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -13,7 +13,7 @@ permissions: jobs: performance: if: github.event_name == 'pull_request' && github.event.label.name == 'performance' - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 timeout-minutes: 45 steps: diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml new file mode 100644 index 000000000000..dc1a79010b5c --- /dev/null +++ b/.github/workflows/semantic.yml @@ -0,0 +1,35 @@ +name: "PR Lint" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v3 + with: + submodules: recursive + - uses: ./.github/actions/action-semantic-pull-request + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + types: | + feat + fix + docs + style + refactor + perf + test + build + ci + chore + revert + change diff --git a/.github/workflows/tars-ci.yml b/.github/workflows/tars-ci.yml index e85044671b89..79b69586255d 100644 --- a/.github/workflows/tars-ci.yml +++ b/.github/workflows/tars-ci.yml @@ -25,10 +25,10 @@ jobs: fail-fast: false matrix: platform: - - ubuntu-18.04 + - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_17 + - linux_openresty_1_19 runs-on: ${{ matrix.platform }} timeout-minutes: 15 diff --git a/.gitignore b/.gitignore index 33afe64aa1a2..25bc8265ab94 100644 --- a/.gitignore +++ b/.gitignore @@ -77,6 +77,10 @@ t/fuzzing/__pycache__/ boofuzz-results/ *.pyc *.wasm +t/grpc_server_example/grpc_server_example +t/plugin/grpc-web/grpc-web-server +t/plugin/grpc-web/node_modules/ + # release tar package *.tgz release/* diff --git a/.gitmodules b/.gitmodules index beb354b89aa3..3c8ed44e4c56 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "t/toolkit"] path = t/toolkit url = https://github.com/api7/test-toolkit.git +[submodule ".github/actions/action-semantic-pull-request"] + path = .github/actions/action-semantic-pull-request + url = https://github.com/amannn/action-semantic-pull-request.git diff --git a/.licenserc.yaml b/.licenserc.yaml index 5822d7fd25bb..ea5863015302 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -19,7 +19,7 @@ header: spdx-id: Apache-2.0 copyright-owner: Apache Software Foundation - license-location-threshold: 250 + license-location-threshold: 360 paths-ignore: - '.gitignore' diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f1a8ab3464c..45fee3ac52a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,8 +23,11 @@ title: Changelog ## Table of Contents +- [2.15.0](#2150) - [2.14.1](#2141) - [2.14.0](#2140) +- [2.13.3](#2133) +- [2.13.2](#2132) - [2.13.1](#2131) - [2.13.0](#2130) - [2.12.1](#2121) @@ -58,11 +61,60 @@ title: Changelog - [0.7.0](#070) - [0.6.0](#060) +## 2.15.0 + +### Change + +- We now map the grpc error code OUT_OF_RANGE to http code 400 in grpc-transcode plugin: [#7419](https://github.com/apache/apisix/pull/7419) +- Rename health_check_retry configuration in etcd section of `config-default.yaml` to startup_retry: [#7304](https://github.com/apache/apisix/pull/7304) +- Remove `upstream.enable_websocket` which is deprecated since 2020: [#7222](https://github.com/apache/apisix/pull/7222) + +### Core + +- Support running plugins conditionally: [#7453](https://github.com/apache/apisix/pull/7453) +- Allow users to specify plugin execution priority: [#7273](https://github.com/apache/apisix/pull/7273) +- Support getting upstream certificate from ssl object: [#7221](https://github.com/apache/apisix/pull/7221) +- Allow customizing error response in the plugin: [#7128](https://github.com/apache/apisix/pull/7128) +- Add metrics to xRPC Redis proxy: [#7183](https://github.com/apache/apisix/pull/7183) +- Introduce deployment role to simplify the deployment of APISIX: + - [#7405](https://github.com/apache/apisix/pull/7405) + - [#7417](https://github.com/apache/apisix/pull/7417) + - [#7392](https://github.com/apache/apisix/pull/7392) + - [#7365](https://github.com/apache/apisix/pull/7365) + - [#7249](https://github.com/apache/apisix/pull/7249) + +### Plugin + +- Add ngx.shared.dict statistic in promethues plugin: [#7412](https://github.com/apache/apisix/pull/7412) +- Allow using unescaped raw URL in proxy-rewrite plugin: [#7401](https://github.com/apache/apisix/pull/7401) +- Add PKCE support to the openid-connect plugin: [#7370](https://github.com/apache/apisix/pull/7370) +- Support custom log format in sls-logger plugin: [#7328](https://github.com/apache/apisix/pull/7328) +- Export some params for kafka-client in kafka-logger plugin: [#7266](https://github.com/apache/apisix/pull/7266) +- Add support for capturing OIDC refresh tokens in openid-connect plugin: [#7220](https://github.com/apache/apisix/pull/7220) +- Add prometheus plugin in stream subsystem: [#7174](https://github.com/apache/apisix/pull/7174) + +### Bugfix + +- clear remain state from the latest try before retrying in Kubernetes discovery: [#7506](https://github.com/apache/apisix/pull/7506) +- the query string was repeated twice when enabling both http_to_https and append_query_string in the redirect plugin: [#7433](https://github.com/apache/apisix/pull/7433) +- don't send empty Authorization header by default in http-logger: [#7444](https://github.com/apache/apisix/pull/7444) +- ensure both `group` and `disable` configurations can be used in limit-count: [#7384](https://github.com/apache/apisix/pull/7384) +- adjust the execution priority of request-id so the tracing plugins can use the request id: [#7281](https://github.com/apache/apisix/pull/7281) +- correct the transcode of repeated Message in grpc-transcode: [#7231](https://github.com/apache/apisix/pull/7231) +- var missing in proxy-cache cache key should be ignored: [#7168](https://github.com/apache/apisix/pull/7168) +- reduce memory usage when abnormal weights are given in chash: [#7103](https://github.com/apache/apisix/pull/7103) +- cache should be bypassed when the method mismatch in proxy-cache: [#7111](https://github.com/apache/apisix/pull/7111) +- Upstream keepalive should consider TLS param: +    - [#7054](https://github.com/apache/apisix/pull/7054) +    - [#7466](https://github.com/apache/apisix/pull/7466) +- The redirect plugin sets a correct port during redirecting HTTP to HTTPS: +    - [#7065](https://github.com/apache/apisix/pull/7065) + ## 2.14.1 -**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.14` branch.** +### Bugfix -[https://github.com/apache/apisix/blob/release/2.14/CHANGELOG.md#2141](https://github.com/apache/apisix/blob/release/2.14/CHANGELOG.md#2141) +- The "unix:" in the `real_ip_from` configuration should not break the batch-requests plugin: [#7106](https://github.com/apache/apisix/pull/7106) ## 2.14.0 @@ -120,6 +172,18 @@ title: Changelog - [#6686](https://github.com/apache/apisix/pull/6686) - Admin API rejects unknown stream plugin: [#6813](https://github.com/apache/apisix/pull/6813) +## 2.13.3 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** + +[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2133](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2133) + +## 2.13.2 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** + +[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2132](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2132) + ## 2.13.1 **This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** diff --git a/LICENSE b/LICENSE index 0474b542fb65..5cadce448d62 100644 --- a/LICENSE +++ b/LICENSE @@ -216,3 +216,4 @@ The following components are provided under the Apache License. See project link The text of each license is the standard Apache 2.0 license. ewma.lua file from kubernetes/ingress-nginx: https://github.com/kubernetes/ingress-nginx Apache 2.0 + hello.go file from OpenFunction/samples: https://github.com/OpenFunction/samples Apache 2.0 diff --git a/MAINTAIN.md b/MAINTAIN.md index cc91824d0045..795aa8c665ef 100644 --- a/MAINTAIN.md +++ b/MAINTAIN.md @@ -26,8 +26,7 @@ 2. Create a [pull request](https://github.com/apache/apisix/commit/21d7673c6e8ff995677456cdebc8ded5afbb3d0a) (contains the backport commits, and the change in step 1) to minor branch > This should include those PRs that contain the `need backport` tag since the last patch release. Also, the title of these PRs need to be added to the changelog of the minor branch. 3. Merge it into minor branch -4. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created -via `VERSION=x.y.z make release-src` +4. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src` 5. Send the [vote email](https://lists.apache.org/thread/vq4qtwqro5zowpdqhx51oznbjy87w9d0) to dev@apisix.apache.org > After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents` 6. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/k2frnvj4zj9oynsbr7h7nd6n6m3q5p89) to dev@apisix.apache.org @@ -38,15 +37,15 @@ via `VERSION=x.y.z make release-src` 11. Update APISIX rpm package > Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the package to yum repo -12. First, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2` +12. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`. + - If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository. 13. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) if the version number is the largest 14. Send the [ANNOUNCE email](https://lists.apache.org/thread.html/ree7b06e6eac854fd42ba4f302079661a172f514a92aca2ef2f1aa7bb%40%3Cdev.apisix.apache.org%3E) to dev@apisix.apache.org & announce@apache.org ### Release minor version 1. Create a minor branch, and create [pull request](https://github.com/apache/apisix/commit/bc6ddf51f15e41fffea6c5bd7d01da9838142b66) to master branch from it -2. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created -via `VERSION=x.y.z make release-src` +2. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src` 3. Send the [vote email](https://lists.apache.org/thread/q8zq276o20r5r9qjkg074nfzb77xwry9) to dev@apisix.apache.org > After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents` 4. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/p1m9s116rojlhb91g38cj8646393qkz7) to dev@apisix.apache.org @@ -57,6 +56,7 @@ via `VERSION=x.y.z make release-src` 9. Update [APISIX's website](https://github.com/apache/apisix-website/commit/7bf0ab5a1bbd795e6571c4bb89a6e646115e7ca3) 10. Update APISIX rpm package. > Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the rpm package to yum repo -11. First, Update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2` +11. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`. + - If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository. 12. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) 13. Send the [ANNOUNCE email](https://lists.apache.org/thread/4s4msqwl1tq13p9dnv3hx7skbgpkozw1) to dev@apisix.apache.org & announce@apache.org diff --git a/Makefile b/Makefile index 989fe3714f8a..49468dc57e41 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,6 @@ SHELL := /bin/bash -o pipefail # Project basic setting VERSION ?= master project_name ?= apache-apisix -project_compose_ci ?= ci/pod/docker-compose.yml project_release_name ?= $(project_name)-$(VERSION)-src @@ -244,7 +243,7 @@ clean: .PHONY: reload reload: runtime @$(call func_echo_status, "$@ -> [ Start ]") - $(ENV_NGINX) -s reload + $(ENV_APISIX) reload @$(call func_echo_success_status, "$@ -> [ Done ]") @@ -260,19 +259,19 @@ install: runtime $(ENV_INSTALL) conf/debug.yaml /usr/local/apisix/conf/debug.yaml $(ENV_INSTALL) conf/cert/* /usr/local/apisix/conf/cert/ - # Lua directories listed in alphabetical order + # directories listed in alphabetical order $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix $(ENV_INSTALL) apisix/*.lua $(ENV_INST_LUADIR)/apisix/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/include/apisix/model - $(ENV_INSTALL) apisix/include/apisix/model/*.proto $(ENV_INST_LUADIR)/apisix/include/apisix/model/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/admin $(ENV_INSTALL) apisix/admin/*.lua $(ENV_INST_LUADIR)/apisix/admin/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/balancer $(ENV_INSTALL) apisix/balancer/*.lua $(ENV_INST_LUADIR)/apisix/balancer/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/cli + $(ENV_INSTALL) apisix/cli/*.lua $(ENV_INST_LUADIR)/apisix/cli/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/control $(ENV_INSTALL) apisix/control/*.lua $(ENV_INST_LUADIR)/apisix/control/ @@ -282,34 +281,34 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/core/dns $(ENV_INSTALL) apisix/core/dns/*.lua $(ENV_INST_LUADIR)/apisix/core/dns - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/cli - $(ENV_INSTALL) apisix/cli/*.lua $(ENV_INST_LUADIR)/apisix/cli/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery $(ENV_INSTALL) apisix/discovery/*.lua $(ENV_INST_LUADIR)/apisix/discovery/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery/{consul_kv,dns,eureka,nacos,kubernetes,tars} $(ENV_INSTALL) apisix/discovery/consul_kv/*.lua $(ENV_INST_LUADIR)/apisix/discovery/consul_kv $(ENV_INSTALL) apisix/discovery/dns/*.lua $(ENV_INST_LUADIR)/apisix/discovery/dns $(ENV_INSTALL) apisix/discovery/eureka/*.lua $(ENV_INST_LUADIR)/apisix/discovery/eureka - $(ENV_INSTALL) apisix/discovery/nacos/*.lua $(ENV_INST_LUADIR)/apisix/discovery/nacos $(ENV_INSTALL) apisix/discovery/kubernetes/*.lua $(ENV_INST_LUADIR)/apisix/discovery/kubernetes + $(ENV_INSTALL) apisix/discovery/nacos/*.lua $(ENV_INST_LUADIR)/apisix/discovery/nacos $(ENV_INSTALL) apisix/discovery/tars/*.lua $(ENV_INST_LUADIR)/apisix/discovery/tars - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/pubsub - $(ENV_INSTALL) apisix/pubsub/*.lua $(ENV_INST_LUADIR)/apisix/pubsub/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http $(ENV_INSTALL) apisix/http/*.lua $(ENV_INST_LUADIR)/apisix/http/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http/router $(ENV_INSTALL) apisix/http/router/*.lua $(ENV_INST_LUADIR)/apisix/http/router/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/include/apisix/model + $(ENV_INSTALL) apisix/include/apisix/model/*.proto $(ENV_INST_LUADIR)/apisix/include/apisix/model/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins $(ENV_INSTALL) apisix/plugins/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin $(ENV_INSTALL) apisix/plugins/ext-plugin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging + $(ENV_INSTALL) apisix/plugins/google-cloud-logging/*.lua $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode $(ENV_INSTALL) apisix/plugins/grpc-transcode/*.lua $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode/ @@ -322,9 +321,6 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-count $(ENV_INSTALL) apisix/plugins/limit-count/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-count/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging - $(ENV_INSTALL) apisix/plugins/google-cloud-logging/*.lua $(ENV_INST_LUADIR)/apisix/plugins/google-cloud-logging/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/opa $(ENV_INSTALL) apisix/plugins/opa/*.lua $(ENV_INST_LUADIR)/apisix/plugins/opa/ @@ -343,6 +339,12 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/syslog $(ENV_INSTALL) apisix/plugins/syslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/syslog/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls + $(ENV_INSTALL) apisix/plugins/tencent-cloud-cls/*.lua $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/pubsub + $(ENV_INSTALL) apisix/pubsub/*.lua $(ENV_INST_LUADIR)/apisix/pubsub/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/zipkin $(ENV_INSTALL) apisix/plugins/zipkin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/zipkin/ diff --git a/README.md b/README.md index e28bef917c13..881c92e67dc8 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ The technical architecture of Apache APISIX: - Mailing List: Mail to dev-subscribe@apisix.apache.org, follow the reply to subscribe to the mailing list. - QQ Group - 552030619, 781365357 -- Slack Workspace - [invitation link](https://join.slack.com/t/the-asf/shared_invite/zt-vlfbf7ch-HkbNHiU_uDlcH_RvaHv9gQ) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix"). +- Slack Workspace - [invitation link](https://join.slack.com/t/the-asf/shared_invite/zt-1egxjz7lw-lWl142XNDopj4FlqNMUM5g) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix"). - ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - follow and interact with us using hashtag `#ApacheAPISIX` - [Documentation](https://apisix.apache.org/docs/) - [Discussions](https://github.com/apache/apisix/discussions) @@ -75,7 +75,7 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - **Full Dynamic** - [Hot Updates And Hot Plugins](docs/en/latest/terminology/plugin.md): Continuously updates its configurations and plugins without restarts! - - [Proxy Rewrite](docs/en/latest/plugins/proxy-rewrite.md): Support rewrite the `host`, `uri`, `schema`, `enable_websocket`, `headers` of the request before send to upstream. + - [Proxy Rewrite](docs/en/latest/plugins/proxy-rewrite.md): Support rewrite the `host`, `uri`, `schema`, `method`, `headers` of the request before send to upstream. - [Response Rewrite](docs/en/latest/plugins/response-rewrite.md): Set customized response status code, body and header to the client. - Dynamic Load Balancing: Round-robin load balancing with weight. - Hash-based Load Balancing: Load balance with consistent hashing sessions. @@ -135,6 +135,8 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - [Fault Injection](docs/en/latest/plugins/fault-injection.md) - [REST Admin API](docs/en/latest/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also, note that the Admin API uses key auth to verify the identity of the caller. **The `admin_key` field in `conf/config.yaml` needs to be modified before deployment to ensure security**. - External Loggers: Export access logs to external log management tools. ([HTTP Logger](docs/en/latest/plugins/http-logger.md), [TCP Logger](docs/en/latest/plugins/tcp-logger.md), [Kafka Logger](docs/en/latest/plugins/kafka-logger.md), [UDP Logger](docs/en/latest/plugins/udp-logger.md), [RocketMQ Logger](docs/en/latest/plugins/rocketmq-logger.md), [SkyWalking Logger](docs/en/latest/plugins/skywalking-logger.md), [Alibaba Cloud Logging(SLS)](docs/en/latest/plugins/sls-logger.md), [Google Cloud Logging](docs/en/latest/plugins/google-cloud-logging.md), [Splunk HEC Logging](docs/en/latest/plugins/splunk-hec-logging.md), [File Logger](docs/en/latest/plugins/file-logger.md), [SolarWinds Loggly Logging](docs/en/latest/plugins/loggly.md)) + - [ClickHouse](docs/en/latest/plugins/clickhouse-logger.md): push logs to ClickHouse. + - [Elasticsearch](docs/en/latest/plugins/elasticsearch-logger.md): push logs to Elasticsearch. - [Datadog](docs/en/latest/plugins/datadog.md): push custom metrics to the DogStatsD server, comes bundled with [Datadog agent](https://docs.datadoghq.com/agent/), over the UDP protocol. DogStatsD basically is an implementation of StatsD protocol which collects the custom metrics for Apache APISIX agent, aggregates it into a single data point and sends it to the configured Datadog server. - [Helm charts](https://github.com/apache/apisix-helm-chart) - [HashiCorp Vault](https://www.vaultproject.io/): Support secret management solution for accessing secrets from Vault secure storage backed in a low trust environment. Currently, RS256 keys (public-private key pairs) or secret keys can be linked from vault in [jwt-auth](docs/en/latest/plugins/jwt-auth.md#enable-jwt-auth-with-vault-compatibility) authentication plugin. @@ -187,7 +189,7 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on [Benchmark script](benchmark/run.sh) has been open sourced, welcome to try and contribute. -[The APISIX APISIX Gateway also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3) +[APISIX also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3) ## Contributor Over Time @@ -199,7 +201,7 @@ Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of on - [European eFactory Platform: API Security Gateway – Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform) - [Copernicus Reference System Software](https://github.com/COPRS/infrastructure/wiki/Networking-trade-off) -- [More Stories](https://apisix.apache.org/blog/tags/user-case) +- [More Stories](https://apisix.apache.org/blog/tags/case-studies/) ## Who Uses APISIX API Gateway? diff --git a/Vision-and-Milestones.md b/Vision-and-Milestones.md new file mode 100644 index 000000000000..333d991f9399 --- /dev/null +++ b/Vision-and-Milestones.md @@ -0,0 +1,40 @@ + + +### Vision + +Apache APISIX is an open source API gateway designed to help developers connect any APIs securely and efficiently in any environment. + +Managing thousands or tens of thousands of APIs and microservices in a multi-cloud and hybrid cloud environment is not an easy task. +There will be many challenges as authentication, observability, security, etc. + +Apache APISIX, a community-driven project, hopes to help everyone better manage and use APIs through the power of developers. +Every developer's contribution will used by thousands of companies and served by billions of users. + +### Milestones + +Apache APISIX has relatively complete features for north-south traffic, +and will be iterated around the following directions in the next 6 months (if you have any ideas, feel free to create issue to discuss): + +- More complete support for Gateway API on APISIX ingress controller +- Add support for service mesh +- User-friendly documentation +- More plugins for public cloud and SaaS services +- Java/Go plugins and Wasm production-ready +- Add dynamic debugging tools for Apache APISIX diff --git a/apisix/admin/consumers.lua b/apisix/admin/consumers.lua index 46b23de09bdb..77416dbb07df 100644 --- a/apisix/admin/consumers.lua +++ b/apisix/admin/consumers.lua @@ -22,6 +22,7 @@ local pairs = pairs local _M = { version = 0.1, + need_v3_filter = true, } diff --git a/apisix/admin/global_rules.lua b/apisix/admin/global_rules.lua index c4dd4ca93380..88d3d2af6009 100644 --- a/apisix/admin/global_rules.lua +++ b/apisix/admin/global_rules.lua @@ -23,6 +23,7 @@ local tostring = tostring local _M = { version = 0.1, + need_v3_filter = true, } diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua index 318348ecd4ab..7aaf8f99f4e6 100644 --- a/apisix/admin/init.lua +++ b/apisix/admin/init.lua @@ -18,6 +18,7 @@ local require = require local core = require("apisix.core") local route = require("apisix.utils.router") local plugin = require("apisix.plugin") +local v3_adapter = require("apisix.admin.v3_adapter") local ngx = ngx local get_method = ngx.req.get_method local ngx_time = ngx.time @@ -46,9 +47,9 @@ local resources = { upstreams = require("apisix.admin.upstreams"), consumers = require("apisix.admin.consumers"), schema = require("apisix.admin.schema"), - ssl = require("apisix.admin.ssl"), + ssls = require("apisix.admin.ssl"), plugins = require("apisix.admin.plugins"), - proto = require("apisix.admin.proto"), + protos = require("apisix.admin.proto"), global_rules = require("apisix.admin.global_rules"), stream_routes = require("apisix.admin.stream_routes"), plugin_metadata = require("apisix.admin.plugin_metadata"), @@ -186,7 +187,17 @@ local function run() local code, data = resource[method](seg_id, req_body, seg_sub_path, uri_args) if code then + if v3_adapter.enable_v3() then + core.response.set_header("X-API-VERSION", "v3") + else + core.response.set_header("X-API-VERSION", "v2") + end + if resource.need_v3_filter then + data = v3_adapter.filter(data) + end + data = strip_etcd_resp(data) + core.response.exit(code, data) end end diff --git a/apisix/admin/plugin_config.lua b/apisix/admin/plugin_config.lua index bcf199fcd27c..708de0164636 100644 --- a/apisix/admin/plugin_config.lua +++ b/apisix/admin/plugin_config.lua @@ -24,6 +24,7 @@ local ipairs = ipairs local _M = { + need_v3_filter = true, } diff --git a/apisix/admin/plugin_metadata.lua b/apisix/admin/plugin_metadata.lua index bde9af05abfb..23859c775e24 100644 --- a/apisix/admin/plugin_metadata.lua +++ b/apisix/admin/plugin_metadata.lua @@ -21,6 +21,7 @@ local utils = require("apisix.admin.utils") local injected_mark = "injected metadata_schema" local _M = { + need_v3_filter = true, } diff --git a/apisix/admin/proto.lua b/apisix/admin/proto.lua index 132db68a1406..d00c216535e0 100644 --- a/apisix/admin/proto.lua +++ b/apisix/admin/proto.lua @@ -26,6 +26,7 @@ local tostring = tostring local _M = { version = 0.1, + need_v3_filter = true, } @@ -69,7 +70,7 @@ function _M.put(id, conf) return 400, err end - local key = "/proto/" .. id + local key = "/protos/" .. id local ok, err = utils.inject_conf_with_prev_conf("proto", key, conf) if not ok then @@ -87,7 +88,7 @@ end function _M.get(id) - local key = "/proto" + local key = "/protos" if id then key = key .. "/" .. id end @@ -109,7 +110,7 @@ function _M.post(id, conf) return 400, err end - local key = "/proto" + local key = "/protos" utils.inject_timestamp(conf) local res, err = core.etcd.push(key, conf) if not res then @@ -181,7 +182,7 @@ function _M.delete(id) end core.log.info("proto delete service ref check pass: ", id) - local key = "/proto/" .. id + local key = "/protos/" .. id -- core.log.info("key: ", key) local res, err = core.etcd.delete(key) if not res then diff --git a/apisix/admin/routes.lua b/apisix/admin/routes.lua index 877f6cf5e2c1..4cd36b385146 100644 --- a/apisix/admin/routes.lua +++ b/apisix/admin/routes.lua @@ -26,6 +26,7 @@ local loadstring = loadstring local _M = { version = 0.2, + need_v3_filter = true, } diff --git a/apisix/admin/services.lua b/apisix/admin/services.lua index 59c53eec3c6f..505ab2ccd045 100644 --- a/apisix/admin/services.lua +++ b/apisix/admin/services.lua @@ -27,6 +27,7 @@ local loadstring = loadstring local _M = { version = 0.3, + need_v3_filter = true, } diff --git a/apisix/admin/ssl.lua b/apisix/admin/ssl.lua index 9a73107c9f10..35f80a7ffe87 100644 --- a/apisix/admin/ssl.lua +++ b/apisix/admin/ssl.lua @@ -22,6 +22,7 @@ local type = type local _M = { version = 0.1, + need_v3_filter = true, } @@ -72,7 +73,7 @@ function _M.put(id, conf) end end - local key = "/ssl/" .. id + local key = "/ssls/" .. id local ok, err = utils.inject_conf_with_prev_conf("ssl", key, conf) if not ok then @@ -90,7 +91,7 @@ end function _M.get(id) - local key = "/ssl" + local key = "/ssls" if id then key = key .. "/" .. id end @@ -126,7 +127,7 @@ function _M.post(id, conf) end end - local key = "/ssl" + local key = "/ssls" utils.inject_timestamp(conf) local res, err = core.etcd.push(key, conf) if not res then @@ -143,7 +144,7 @@ function _M.delete(id) return 400, {error_msg = "missing ssl id"} end - local key = "/ssl/" .. id + local key = "/ssls/" .. id -- core.log.info("key: ", key) local res, err = core.etcd.delete(key) if not res then @@ -168,7 +169,7 @@ function _M.patch(id, conf, sub_path) return 400, {error_msg = "invalid configuration"} end - local key = "/ssl" + local key = "/ssls" if id then key = key .. "/" .. id end diff --git a/apisix/admin/stream_routes.lua b/apisix/admin/stream_routes.lua index 6770830acf1f..51b944ebaea3 100644 --- a/apisix/admin/stream_routes.lua +++ b/apisix/admin/stream_routes.lua @@ -22,6 +22,7 @@ local tostring = tostring local _M = { version = 0.1, + need_v3_filter = true, } diff --git a/apisix/admin/upstreams.lua b/apisix/admin/upstreams.lua index 5aec652691f3..45a7199f4373 100644 --- a/apisix/admin/upstreams.lua +++ b/apisix/admin/upstreams.lua @@ -26,6 +26,7 @@ local type = type local _M = { version = 0.2, + need_v3_filter = true, } diff --git a/apisix/admin/utils.lua b/apisix/admin/utils.lua index 3ff695a473b6..db73dda6751f 100644 --- a/apisix/admin/utils.lua +++ b/apisix/admin/utils.lua @@ -24,8 +24,8 @@ local _M = {} local function inject_timestamp(conf, prev_conf, patch_conf) if not conf.create_time then - if prev_conf and prev_conf.node.value.create_time then - conf.create_time = prev_conf.node.value.create_time + if prev_conf and (prev_conf.node or prev_conf.list).value.create_time then + conf.create_time = (prev_conf.node or prev_conf.list).value.create_time else -- As we don't know existent data's create_time, we have to pretend -- they are created now. diff --git a/apisix/admin/v3_adapter.lua b/apisix/admin/v3_adapter.lua new file mode 100644 index 000000000000..2bb03cbeb208 --- /dev/null +++ b/apisix/admin/v3_adapter.lua @@ -0,0 +1,214 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local fetch_local_conf = require("apisix.core.config_local").local_conf +local try_read_attr = require("apisix.core.table").try_read_attr +local deepcopy = require("apisix.core.table").deepcopy +local log = require("apisix.core.log") +local request = require("apisix.core.request") +local response = require("apisix.core.response") +local table = require("apisix.core.table") +local tonumber = tonumber +local re_find = ngx.re.find +local pairs = pairs + +local _M = {} + + +local admin_api_version +local function enable_v3() + if admin_api_version then + if admin_api_version == "v3" then + return true + end + + if admin_api_version == "default" then + return false + end + end + + local local_conf, err = fetch_local_conf() + if not local_conf then + admin_api_version = "default" + log.error("failed to fetch local conf: ", err) + return false + end + + local api_ver = try_read_attr(local_conf, "apisix", "admin_api_version") + if api_ver ~= "v3" then + admin_api_version = "default" + return false + end + + admin_api_version = api_ver + return true +end +_M.enable_v3 = enable_v3 + + +function _M.to_v3(body, action) + if not enable_v3() then + body.action = action + end +end + + +function _M.to_v3_list(body) + if not enable_v3() then + return + end + + if body.node.dir then + body.list = body.node.nodes + body.node = nil + end +end + + +local function sort(l, r) + return l.createdIndex < r.createdIndex +end + + +local function pagination(body, args) + args.page = tonumber(args.page) + args.page_size = tonumber(args.page_size) + if not args.page or not args.page_size then + return + end + + if args.page_size < 10 or args.page_size > 500 then + return response.exit(400, "page_size must be between 10 and 500") + end + + if not args.page or args.page < 1 then + -- default page is 1 + args.page = 1 + end + + local list = body.list + + -- sort nodes by there createdIndex + table.sort(list, sort) + + local to = args.page * args.page_size + local from = to - args.page_size + 1 + + local res = table.new(20, 0) + + for i = from, to do + if list[i] then + res[i - from + 1] = list[i] + end + end + + body.list = res +end + + +local function filter(body, args) + if not args.name and not args.label and not args.uri then + return + end + + for i = #body.list, 1, -1 do + local name_matched = true + local label_matched = true + local uri_matched = true + if args.name then + name_matched = false + local matched = re_find(body.list[i].value.name, args.name, "jo") + if matched then + name_matched = true + end + end + + if args.label then + label_matched = false + if body.list[i].value.labels then + for k, _ in pairs(body.list[i].value.labels) do + if k == args.label then + label_matched = true + break + end + end + end + end + + if args.uri then + uri_matched = false + if body.list[i].value.uri then + local matched = re_find(body.list[i].value.uri, args.uri, "jo") + if matched then + uri_matched = true + end + end + + if body.list[i].value.uris then + for _, uri in pairs(body.list[i].value.uris) do + if re_find(uri, args.uri, "jo") then + uri_matched = true + break + end + end + end + end + + if not name_matched or not label_matched or not uri_matched then + table.remove(body.list, i) + end + end +end + + +function _M.filter(body) + if not enable_v3() then + return + end + + local args = request.get_uri_args() + local processed_body = deepcopy(body) + + if processed_body.deleted then + processed_body.node = nil + end + + -- strip node wrapping for single query, create, and update scenarios. + if processed_body.node then + processed_body = processed_body.node + end + + -- filter and paging logic for list query only + if processed_body.list then + filter(processed_body, args) + + -- calculate the total amount of filtered data + processed_body.total = processed_body.list and #processed_body.list or 0 + + pagination(processed_body, args) + + -- remove the count field returned by etcd + -- we don't need a field that reflects the length of the currently returned data, + -- it doesn't make sense + processed_body.count = nil + end + + return processed_body +end + + +return _M diff --git a/apisix/balancer.lua b/apisix/balancer.lua index 4dd387400533..462d04f07ad2 100644 --- a/apisix/balancer.lua +++ b/apisix/balancer.lua @@ -26,6 +26,7 @@ local set_more_tries = balancer.set_more_tries local get_last_failure = balancer.get_last_failure local set_timeouts = balancer.set_timeouts local ngx_now = ngx.now +local str_byte = string.byte local module_name = "balancer" @@ -195,6 +196,12 @@ local function pick_server(route, ctx) core.log.info("ctx: ", core.json.delay_encode(ctx, true)) local up_conf = ctx.upstream_conf + for _, node in ipairs(up_conf.nodes) do + if core.utils.parse_ipv6(node.host) and str_byte(node.host, 1) ~= str_byte("[") then + node.host = '[' .. node.host .. ']' + end + end + local nodes_count = #up_conf.nodes if nodes_count == 1 then local node = up_conf.nodes[1] @@ -302,6 +309,7 @@ do local size = keepalive_pool.size local requests = keepalive_pool.requests + core.table.clear(pool_opt) pool_opt.pool_size = size local scheme = up_conf.scheme @@ -358,7 +366,7 @@ function _M.run(route, ctx, plugin_funcs) local header_changed local pass_host = ctx.pass_host - if pass_host == "node" and balancer.recreate_request then + if pass_host == "node" then local host = server.upstream_host if host ~= ctx.var.upstream_host then -- retried node has a different host @@ -369,7 +377,7 @@ function _M.run(route, ctx, plugin_funcs) local _, run = plugin_funcs("before_proxy") -- always recreate request as the request may be changed by plugins - if (run or header_changed) and balancer.recreate_request then + if run or header_changed then balancer.recreate_request() end end diff --git a/apisix/cli/apisix.lua b/apisix/cli/apisix.lua index d284e20848dd..079691f51a04 100755 --- a/apisix/cli/apisix.lua +++ b/apisix/cli/apisix.lua @@ -18,14 +18,20 @@ local pkg_cpath_org = package.cpath local pkg_path_org = package.path +local _, find_pos_end = string.find(pkg_path_org, ";", -1, true) +if not find_pos_end then + pkg_path_org = pkg_path_org .. ";" +end + local apisix_home = "/usr/local/apisix" local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;" .. apisix_home .. "/deps/lib/lua/5.1/?.so;" -local pkg_path = apisix_home .. "/deps/share/lua/5.1/?.lua;" +local pkg_path_deps = apisix_home .. "/deps/share/lua/5.1/?.lua;" +local pkg_path_env = apisix_home .. "/?.lua;" -- modify the load path to load our dependencies package.cpath = pkg_cpath .. pkg_cpath_org -package.path = pkg_path .. pkg_path_org +package.path = pkg_path_deps .. pkg_path_org .. pkg_path_env -- pass path to construct the final result local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org) diff --git a/apisix/cli/env.lua b/apisix/cli/env.lua index 3c78ab3c11d2..f0e1a36e7e88 100644 --- a/apisix/cli/env.lua +++ b/apisix/cli/env.lua @@ -82,7 +82,7 @@ return function (apisix_home, pkg_cpath_org, pkg_path_org) -- pre-transform openresty path res, err = util.execute_cmd("command -v openresty") if not res then - error("failed to exec ulimit cmd \'command -v openresty\', err: " .. err) + error("failed to exec cmd \'command -v openresty\', err: " .. err) end local openresty_path_abs = util.trim(res) diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua index 9edfbcd51876..adf5c26cd8d5 100644 --- a/apisix/cli/etcd.lua +++ b/apisix/cli/etcd.lua @@ -197,8 +197,9 @@ function _M.init(env, args) local res, err local retry_time = 0 - local health_check_retry = tonumber(yaml_conf.etcd.health_check_retry) or 2 - while retry_time < health_check_retry do + local etcd = yaml_conf.etcd + local max_retry = tonumber(etcd.startup_retry) or 2 + while retry_time < max_retry do res, err = request(version_url, yaml_conf) -- In case of failure, request returns nil followed by an error message. -- Else the first return value is the response body diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua index 5bd64a682e96..dae9504dc151 100644 --- a/apisix/cli/file.lua +++ b/apisix/cli/file.lua @@ -133,10 +133,6 @@ local function path_is_multi_type(path, type_val) return true end - if path == "apisix->ssl->listen_port" and type_val == "number" then - return true - end - return false end @@ -237,14 +233,6 @@ function _M.read_yaml_conf(apisix_home) end end - if default_conf.deployment - and default_conf.deployment.role == "traditional" - and default_conf.deployment.etcd - then - default_conf.etcd = default_conf.deployment.etcd - default_conf.etcd.unix_socket_proxy = "unix:./conf/config_listen.sock" - end - if default_conf.apisix.config_center == "yaml" then local apisix_conf_path = profile:yaml_path("apisix") local apisix_conf_yaml, _ = util.read_file(apisix_conf_path) @@ -259,6 +247,35 @@ function _M.read_yaml_conf(apisix_home) end end + if default_conf.deployment then + if default_conf.deployment.role == "traditional" then + default_conf.etcd = default_conf.deployment.etcd + + elseif default_conf.deployment.role == "control_plane" then + default_conf.etcd = default_conf.deployment.etcd + default_conf.apisix.enable_admin = true + + elseif default_conf.deployment.role == "data_plane" then + if default_conf.deployment.role_data_plane.config_provider == "yaml" then + default_conf.apisix.config_center = "yaml" + else + default_conf.etcd = default_conf.deployment.role_data_plane.control_plane + end + default_conf.apisix.enable_admin = false + end + + if default_conf.etcd and default_conf.deployment.certs then + -- copy certs configuration to keep backward compatible + local certs = default_conf.deployment.certs + local etcd = default_conf.etcd + if not etcd.tls then + etcd.tls = {} + end + etcd.tls.cert = certs.cert + etcd.tls.key = certs.cert_key + end + end + return default_conf end diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua index 161c530b8d74..5d814a091e4e 100644 --- a/apisix/cli/ngx_tpl.lua +++ b/apisix/cli/ngx_tpl.lua @@ -64,8 +64,14 @@ lua { {% end %} } -{% if (enabled_stream_plugins["prometheus"] or conf_server) and not enable_http then %} +{% if enabled_stream_plugins["prometheus"] and not enable_http then %} http { + lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=] + .. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};"; + lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=] + .. [=[$prefix/deps/lib/lua/5.1/?.so;;]=] + .. [=[{*lua_cpath*};"; + {% if enabled_stream_plugins["prometheus"] then %} init_worker_by_lua_block { require("apisix.plugins.prometheus.exporter").http_init(true) @@ -296,33 +302,6 @@ http { lua_shared_dict {*cache_key*} {*cache_size*}; {% end %} {% end %} - {% if http.lua_shared_dicts then %} - {% for cache_key, cache_size in pairs(http.lua_shared_dicts) do %} - lua_shared_dict {*cache_key*} {*cache_size*}; - {% end %} - {% end %} - - {% if enabled_plugins["proxy-cache"] then %} - # for proxy cache - {% for _, cache in ipairs(proxy_cache.zones) do %} - {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} - proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off; - {% else %} - lua_shared_dict {* cache.name *} {* cache.memory_size *}; - {% end %} - {% end %} - {% end %} - - {% if enabled_plugins["proxy-cache"] then %} - # for proxy cache - map $upstream_cache_zone $upstream_cache_zone_info { - {% for _, cache in ipairs(proxy_cache.zones) do %} - {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} - {* cache.name *} {* cache.disk_path *},{* cache.cache_levels *}; - {% end %} - {% end %} - } - {% end %} {% if enabled_plugins["error-log-logger"] then %} lua_capture_error_log 10m; @@ -369,10 +348,7 @@ http { # error_page error_page 500 @50x.html; - {% if real_ip_header then %} - real_ip_header {* real_ip_header *}; - {% print("\nDeprecated: apisix.real_ip_header has been moved to nginx_config.http.real_ip_header. apisix.real_ip_header will be removed in the future version. Please use nginx_config.http.real_ip_header first.\n\n") %} - {% elseif http.real_ip_header then %} + {% if http.real_ip_header then %} real_ip_header {* http.real_ip_header *}; {% end %} @@ -380,12 +356,7 @@ http { real_ip_recursive {* http.real_ip_recursive *}; {% end %} - {% if real_ip_from then %} - {% print("\nDeprecated: apisix.real_ip_from has been moved to nginx_config.http.real_ip_from. apisix.real_ip_from will be removed in the future version. Please use nginx_config.http.real_ip_from first.\n\n") %} - {% for _, real_ip in ipairs(real_ip_from) do %} - set_real_ip_from {*real_ip*}; - {% end %} - {% elseif http.real_ip_from then %} + {% if http.real_ip_from then %} {% for _, real_ip in ipairs(http.real_ip_from) do %} set_real_ip_from {*real_ip*}; {% end %} @@ -461,17 +432,20 @@ http { dns_resolver = dns_resolver, } apisix.http_init(args) + + -- set apisix_lua_home into constans module + -- it may be used by plugins to determine the work path of apisix + local constants = require("apisix.constants") + constants.apisix_lua_home = "{*apisix_lua_home*}" } init_worker_by_lua_block { apisix.http_init_worker() } - {% if not use_openresty_1_17 then %} exit_worker_by_lua_block { apisix.http_exit_worker() } - {% end %} {% if enable_control then %} server { @@ -515,7 +489,7 @@ http { } {% end %} - {% if enable_admin and admin_server_addr then %} + {% if enable_admin then %} server { {%if https_admin then%} listen {* admin_server_addr *} ssl; @@ -580,6 +554,27 @@ http { {* conf_server *} {% end %} + {% if deployment_role ~= "control_plane" then %} + + {% if enabled_plugins["proxy-cache"] then %} + # for proxy cache + {% for _, cache in ipairs(proxy_cache.zones) do %} + {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} + proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off; + {% else %} + lua_shared_dict {* cache.name *} {* cache.memory_size *}; + {% end %} + {% end %} + + map $upstream_cache_zone $upstream_cache_zone_info { + {% for _, cache in ipairs(proxy_cache.zones) do %} + {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} + {* cache.name *} {* cache.disk_path *},{* cache.cache_levels *}; + {% end %} + {% end %} + } + {% end %} + server { {% for _, item in ipairs(node_listen) do %} listen {* item.ip *}:{* item.port *} default_server {% if item.enable_http2 then %} http2 {% end %} {% if enable_reuseport then %} reuseport {% end %}; @@ -593,7 +588,7 @@ http { listen {* proxy_protocol.listen_http_port *} default_server proxy_protocol; {% end %} {% if proxy_protocol and proxy_protocol.listen_https_port then %} - listen {* proxy_protocol.listen_https_port *} ssl default_server {% if ssl.enable_http2 then %} http2 {% end %} proxy_protocol; + listen {* proxy_protocol.listen_https_port *} ssl default_server proxy_protocol; {% end %} server_name _; @@ -631,27 +626,6 @@ http { stub_status; } - {% if enable_admin and not admin_server_addr then %} - location /apisix/admin { - set $upstream_scheme 'http'; - set $upstream_host $http_host; - set $upstream_uri ''; - - {%if allow_admin then%} - {% for _, allow_ip in ipairs(allow_admin) do %} - allow {*allow_ip*}; - {% end %} - deny all; - {%else%} - allow all; - {%end%} - - content_by_lua_block { - apisix.http_admin() - } - } - {% end %} - {% if ssl.enable then %} ssl_certificate_by_lua_block { apisix.http_ssl_phase() @@ -852,6 +826,8 @@ http { } } } + {% end %} + # http end configuration snippet starts {% if http_end_configuration_snippet then %} {* http_end_configuration_snippet *} diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua index 1e27d9a206d5..320efb15f3c4 100644 --- a/apisix/cli/ops.lua +++ b/apisix/cli/ops.lua @@ -66,6 +66,7 @@ stop: stop the apisix server quit: stop the apisix server gracefully restart: restart the apisix server reload: reload the apisix server +test: test the generated nginx.conf version: print the version of apisix ]]) end @@ -234,16 +235,11 @@ Please modify "admin_key" in conf/config.yaml . util.die("can not find openresty\n") end - local need_ver = "1.17.8" + local need_ver = "1.19.3" if not version_greater_equal(or_ver, need_ver) then util.die("openresty version must >=", need_ver, " current ", or_ver, "\n") end - local use_openresty_1_17 = false - if not version_greater_equal(or_ver, "1.19.3") then - use_openresty_1_17 = true - end - local or_info = util.execute_cmd("openresty -V 2>&1") if or_info and not or_info:find("http_stub_status_module", 1, true) then util.die("'http_stub_status_module' module is missing in ", @@ -319,14 +315,10 @@ Please modify "admin_key" in conf/config.yaml . -- listen in admin use a separate port, support specific IP, compatible with the original style local admin_server_addr if yaml_conf.apisix.enable_admin then - if yaml_conf.apisix.admin_listen then - admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", - yaml_conf.apisix.admin_listen.ip, - 9180, yaml_conf.apisix.admin_listen.port) - elseif yaml_conf.apisix.port_admin then - admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", nil, - 9180, yaml_conf.apisix.port_admin) - end + local ip = yaml_conf.apisix.admin_listen.ip + local port = yaml_conf.apisix.admin_listen.port + admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", ip, + 9180, port) end local control_server_addr @@ -432,46 +424,28 @@ Please modify "admin_key" in conf/config.yaml . local ssl_listen = {} -- listen in https, support multiple ports, support specific IP for _, value in ipairs(yaml_conf.apisix.ssl.listen) do - if type(value) == "number" then - listen_table_insert(ssl_listen, "https", "0.0.0.0", value, - yaml_conf.apisix.ssl.enable_http2, yaml_conf.apisix.enable_ipv6) - elseif type(value) == "table" then - local ip = value.ip - local port = value.port - local enable_ipv6 = false - local enable_http2 = (value.enable_http2 or yaml_conf.apisix.ssl.enable_http2) - - if ip == nil then - ip = "0.0.0.0" - if yaml_conf.apisix.enable_ipv6 then - enable_ipv6 = true - end - end - - if port == nil then - port = 9443 - end - - if enable_http2 == nil then - enable_http2 = false + local ip = value.ip + local port = value.port + local enable_ipv6 = false + local enable_http2 = value.enable_http2 + + if ip == nil then + ip = "0.0.0.0" + if yaml_conf.apisix.enable_ipv6 then + enable_ipv6 = true end + end - listen_table_insert(ssl_listen, "https", ip, port, - enable_http2, enable_ipv6) + if port == nil then + port = 9443 end - end - -- listen in https, compatible with the original style - if type(yaml_conf.apisix.ssl.listen_port) == "number" then - listen_table_insert(ssl_listen, "https", "0.0.0.0", yaml_conf.apisix.ssl.listen_port, - yaml_conf.apisix.ssl.enable_http2, yaml_conf.apisix.enable_ipv6) - elseif type(yaml_conf.apisix.ssl.listen_port) == "table" then - for _, value in ipairs(yaml_conf.apisix.ssl.listen_port) do - if type(value) == "number" then - listen_table_insert(ssl_listen, "https", "0.0.0.0", value, - yaml_conf.apisix.ssl.enable_http2, yaml_conf.apisix.enable_ipv6) - end + if enable_http2 == nil then + enable_http2 = false end + + listen_table_insert(ssl_listen, "https", ip, port, + enable_http2, enable_ipv6) end yaml_conf.apisix.ssl.listen = ssl_listen @@ -539,15 +513,28 @@ Please modify "admin_key" in conf/config.yaml . proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout end - local conf_server = snippet.generate_conf_server(yaml_conf) + local conf_server, err = snippet.generate_conf_server(env, yaml_conf) + if err then + util.die(err, "\n") + end + + if yaml_conf.deployment and yaml_conf.deployment.role then + local role = yaml_conf.deployment.role + env.deployment_role = role + + if role == "control_plane" and not admin_server_addr then + local listen = node_listen[1] + admin_server_addr = str_format("%s:%s", listen.ip, listen.port) + end + end -- Using template.render local sys_conf = { - use_openresty_1_17 = use_openresty_1_17, lua_path = env.pkg_path_org, lua_cpath = env.pkg_cpath_org, os_name = util.trim(util.execute_cmd("uname")), apisix_lua_home = env.apisix_home, + deployment_role = env.deployment_role, use_apisix_openresty = use_apisix_openresty, error_log = {level = "warn"}, enable_http = enable_http, @@ -642,11 +629,6 @@ Please modify "admin_key" in conf/config.yaml . sys_conf["worker_processes"] = floor(tonumber(env_worker_processes)) end - if sys_conf["http"]["lua_shared_dicts"] then - stderr:write("lua_shared_dicts is deprecated, " .. - "use custom_lua_shared_dict instead\n") - end - local exported_vars = file.get_exported_vars() if exported_vars then if not sys_conf["envs"] then @@ -806,7 +788,10 @@ local function start(env, ...) end init(env) - init_etcd(env, args) + + if env.deployment_role ~= "data_plane" then + init_etcd(env, args) + end util.execute_cmd(env.openresty_args) end diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua index ab053a0a727e..dbf83ef78c28 100644 --- a/apisix/cli/schema.lua +++ b/apisix/cli/schema.lua @@ -43,18 +43,24 @@ local etcd_schema = { key = { type = "string", }, - } + }, }, prefix = { type = "string", - pattern = [[^/[^/]+$]] }, host = { type = "array", items = { type = "string", pattern = [[^https?://]] - } + }, + minItems = 1, + }, + timeout = { + type = "integer", + default = 30, + minimum = 1, + description = "etcd connection timeout in seconds", } }, required = {"prefix", "host"} @@ -127,12 +133,19 @@ local config_schema = { } } }, - port_admin = { - type = "integer", - }, https_admin = { type = "boolean", }, + admin_listen = { + properties = { + listen = { type = "string" }, + port = { type = "integer" }, + }, + default = { + listen = "0.0.0.0", + port = 9180, + } + }, stream_proxy = { type = "object", properties = { @@ -202,6 +215,25 @@ local config_schema = { properties = { ssl_trusted_certificate = { type = "string", + }, + listen = { + type = "array", + items = { + type = "object", + properties = { + ip = { + type = "string", + }, + port = { + type = "integer", + minimum = 1, + maximum = 65535 + }, + enable_http2 = { + type = "boolean", + } + } + } } } }, @@ -271,9 +303,82 @@ local deployment_schema = { traditional = { properties = { etcd = etcd_schema, + role_traditional = { + properties = { + config_provider = { + enum = {"etcd"} + }, + }, + required = {"config_provider"} + } }, required = {"etcd"} }, + control_plane = { + properties = { + etcd = etcd_schema, + role_control_plane = { + properties = { + config_provider = { + enum = {"etcd"} + }, + conf_server = { + properties = { + listen = { + type = "string", + default = "0.0.0.0:9280", + }, + cert = { type = "string" }, + cert_key = { type = "string" }, + client_ca_cert = { type = "string" }, + }, + required = {"cert", "cert_key"} + }, + }, + required = {"config_provider", "conf_server"} + }, + certs = { + properties = { + cert = { type = "string" }, + cert_key = { type = "string" }, + trusted_ca_cert = { type = "string" }, + }, + dependencies = { + cert = { + required = {"cert_key"}, + }, + }, + default = {}, + }, + }, + required = {"etcd", "role_control_plane"} + }, + data_plane = { + properties = { + role_data_plane = { + properties = { + config_provider = { + enum = {"control_plane", "yaml"} + }, + }, + required = {"config_provider"} + }, + certs = { + properties = { + cert = { type = "string" }, + cert_key = { type = "string" }, + trusted_ca_cert = { type = "string" }, + }, + dependencies = { + cert = { + required = {"cert_key"}, + }, + }, + default = {}, + }, + }, + required = {"role_data_plane"} + } } diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua index 014719511faa..3b5eb3232394 100644 --- a/apisix/cli/snippet.lua +++ b/apisix/cli/snippet.lua @@ -15,6 +15,7 @@ -- limitations under the License. -- local template = require("resty.template") +local pl_path = require("pl.path") local ipairs = ipairs @@ -22,42 +23,158 @@ local ipairs = ipairs local _M = {} -function _M.generate_conf_server(conf) - if not (conf.deployment and conf.deployment.role == "traditional") then - return nil +function _M.generate_conf_server(env, conf) + if not (conf.deployment and ( + conf.deployment.role == "traditional" or + conf.deployment.role == "control_plane")) + then + return nil, nil end -- we use proxy even the role is traditional so that we can test the proxy in daily dev - local servers = conf.deployment.etcd.host + local etcd = conf.deployment.etcd + local servers = etcd.host + local enable_https = false + local prefix = "https://" + if servers[1]:find(prefix, 1, true) then + enable_https = true + end + for i, s in ipairs(servers) do - local prefix = "http://" - -- TODO: support https - if s:find(prefix, 1, true) then - servers[i] = s:sub(#prefix + 1) + if (s:find(prefix, 1, true) ~= nil) ~= enable_https then + return nil, "all nodes in the etcd cluster should enable/disable TLS together" + end + + local _, to = s:find("://", 1, true) + if not to then + return nil, "bad etcd endpoint format" + end + end + + local control_plane + if conf.deployment.role == "control_plane" then + control_plane = conf.deployment.role_control_plane.conf_server + control_plane.cert = pl_path.abspath(control_plane.cert) + control_plane.cert_key = pl_path.abspath(control_plane.cert_key) + + if control_plane.client_ca_cert then + control_plane.client_ca_cert = pl_path.abspath(control_plane.client_ca_cert) + end + end + + local trusted_ca_cert + if conf.deployment.certs then + if conf.deployment.certs.trusted_ca_cert then + trusted_ca_cert = pl_path.abspath(conf.deployment.certs.trusted_ca_cert) end end local conf_render = template.compile([[ upstream apisix_conf_backend { - {% for _, addr in ipairs(servers) do %} - server {* addr *}; - {% end %} + server 0.0.0.0:80; + balancer_by_lua_block { + local conf_server = require("apisix.conf_server") + conf_server.balancer() + } } + + {% if trusted_ca_cert then %} + lua_ssl_trusted_certificate {* trusted_ca_cert *}; + {% end %} + server { - listen unix:./conf/config_listen.sock; + {% if control_plane then %} + listen {* control_plane.listen *} ssl; + ssl_certificate {* control_plane.cert *}; + ssl_certificate_key {* control_plane.cert_key *}; + + {% if control_plane.client_ca_cert then %} + ssl_verify_client on; + ssl_client_certificate {* control_plane.client_ca_cert *}; + {% end %} + + {% else %} + listen unix:{* home *}/conf/config_listen.sock; + {% end %} + access_log off; + + set $upstream_host ''; + + access_by_lua_block { + local conf_server = require("apisix.conf_server") + conf_server.access() + } + location / { - set $upstream_scheme 'http'; + {% if enable_https then %} + proxy_pass https://apisix_conf_backend; + proxy_ssl_protocols TLSv1.2 TLSv1.3; + proxy_ssl_server_name on; + + {% if etcd_tls_verify then %} + proxy_ssl_verify on; + proxy_ssl_trusted_certificate {* ssl_trusted_certificate *}; + {% end %} + + {% if sni then %} + proxy_ssl_name {* sni *}; + {% else %} + proxy_ssl_name $upstream_host; + {% end %} + + {% if client_cert then %} + proxy_ssl_certificate {* client_cert *}; + proxy_ssl_certificate_key {* client_cert_key *}; + {% end %} - proxy_pass $upstream_scheme://apisix_conf_backend; + {% else %} + proxy_pass http://apisix_conf_backend; + {% end %} proxy_http_version 1.1; proxy_set_header Connection ""; + proxy_set_header Host $upstream_host; + proxy_next_upstream error timeout non_idempotent http_500 http_502 http_503 http_504; + } + + log_by_lua_block { + local conf_server = require("apisix.conf_server") + conf_server.log() } } ]]) + + local tls = etcd.tls + local client_cert + local client_cert_key + local ssl_trusted_certificate + local etcd_tls_verify + if tls then + if tls.cert then + client_cert = pl_path.abspath(tls.cert) + client_cert_key = pl_path.abspath(tls.key) + end + + etcd_tls_verify = tls.verify + if enable_https and etcd_tls_verify then + if not conf.apisix.ssl.ssl_trusted_certificate then + return nil, "should set ssl_trusted_certificate if etcd tls verify is enabled" + end + ssl_trusted_certificate = pl_path.abspath(conf.apisix.ssl.ssl_trusted_certificate) + end + end + return conf_render({ - servers = servers + sni = tls and tls.sni, + home = env.apisix_home or ".", + control_plane = control_plane, + enable_https = enable_https, + client_cert = client_cert, + client_cert_key = client_cert_key, + trusted_ca_cert = trusted_ca_cert, + etcd_tls_verify = etcd_tls_verify, + ssl_trusted_certificate = ssl_trusted_certificate, }) end diff --git a/apisix/conf_server.lua b/apisix/conf_server.lua new file mode 100644 index 000000000000..e0ea91e77013 --- /dev/null +++ b/apisix/conf_server.lua @@ -0,0 +1,299 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local fetch_local_conf = require("apisix.core.config_local").local_conf +local picker = require("apisix.balancer.least_conn") +local balancer = require("ngx.balancer") +local error = error +local ipairs = ipairs +local ngx = ngx +local ngx_shared = ngx.shared +local ngx_var = ngx.var +local tonumber = tonumber + + +local _M = {} +local servers = {} +local resolved_results = {} +local server_picker +local has_domain = false + +local is_http = ngx.config.subsystem == "http" +local health_check_shm_name = "etcd-cluster-health-check" +if not is_http then + health_check_shm_name = health_check_shm_name .. "-stream" +end +-- an endpoint is unhealthy if it is failed for HEALTH_CHECK_MAX_FAILURE times in +-- HEALTH_CHECK_DURATION_SECOND +local HEALTH_CHECK_MAX_FAILURE = 3 +local HEALTH_CHECK_DURATION_SECOND = 10 + + +local function create_resolved_result(server) + local host, port = core.utils.parse_addr(server) + return { + host = host, + port = port, + server = server, + } +end + + +function _M.init() + local conf = fetch_local_conf() + if not (conf.deployment and conf.deployment.etcd) then + return + end + + local etcd = conf.deployment.etcd + if etcd.health_check_timeout then + HEALTH_CHECK_DURATION_SECOND = etcd.health_check_timeout + end + + for i, s in ipairs(etcd.host) do + local _, to = core.string.find(s, "://") + if not to then + error("bad etcd endpoint format") + end + + local addr = s:sub(to + 1) + local host, _, err = core.utils.parse_addr(addr) + if err then + error("failed to parse host: ".. err) + end + + resolved_results[i] = create_resolved_result(addr) + servers[i] = addr + + if not core.utils.parse_ipv4(host) and not core.utils.parse_ipv6(host) then + has_domain = true + resolved_results[i].domain = host + end + end + + if #servers > 1 then + local nodes = {} + for _, s in ipairs(servers) do + nodes[s] = 1 + end + server_picker = picker.new(nodes, {}) + end +end + + +local function response_err(err) + core.log.error("failure in conf server: ", err) + + if ngx.get_phase() == "balancer" then + return + end + + ngx.status = 503 + ngx.say(core.json.encode({error = err})) + ngx.exit(0) +end + + +local function resolve_servers(ctx) + if not has_domain then + return + end + + local changed = false + for _, res in ipairs(resolved_results) do + local domain = res.domain + if not domain then + goto CONTINUE + end + + local ip, err = core.resolver.parse_domain(domain) + if ip and res.host ~= ip then + res.host = ip + changed = true + core.log.info(domain, " is resolved to: ", ip) + end + + if err then + core.log.error("dns resolver resolves domain: ", domain, " error: ", err) + end + + ::CONTINUE:: + end + + if not changed then + return + end + + if #servers > 1 then + local nodes = {} + for _, res in ipairs(resolved_results) do + local s = res.server + nodes[s] = 1 + end + server_picker = picker.new(nodes, {}) + end +end + + +local function gen_unhealthy_key(addr) + return "conf_server:" .. addr +end + + +local function is_node_health(addr) + local key = gen_unhealthy_key(addr) + local count, err = ngx_shared[health_check_shm_name]:get(key) + if err then + core.log.warn("failed to get health check count, key: ", key, " err: ", err) + return true + end + + if not count then + return true + end + + return tonumber(count) < HEALTH_CHECK_MAX_FAILURE +end + + +local function report_failure(addr) + local key = gen_unhealthy_key(addr) + local count, err = + ngx_shared[health_check_shm_name]:incr(key, 1, 0, HEALTH_CHECK_DURATION_SECOND) + if not count then + core.log.error("failed to report failure, key: ", key, " err: ", err) + else + -- count might be larger than HEALTH_CHECK_MAX_FAILURE + core.log.warn("report failure, endpoint: ", addr, " count: ", count) + end +end + + +local function pick_node_by_server_picker(ctx) + local server, err = ctx.server_picker.get(ctx) + if not server then + err = err or "no valid upstream node" + return nil, "failed to find valid upstream server: " .. err + end + + ctx.balancer_server = server + + for _, r in ipairs(resolved_results) do + if r.server == server then + return r + end + end + + return nil, "unknown server: " .. server +end + + +local function pick_node(ctx) + local res + if server_picker then + if not ctx.server_picker then + ctx.server_picker = server_picker + end + + local err + res, err = pick_node_by_server_picker(ctx) + if not res then + return nil, err + end + + while not is_node_health(res.server) do + core.log.warn("endpoint ", res.server, " is unhealthy, skipped") + + if server_picker.after_balance then + server_picker.after_balance(ctx, true) + end + + res, err = pick_node_by_server_picker(ctx) + if not res then + return nil, err + end + end + + else + -- we don't do health check if there is only one candidate + res = resolved_results[1] + end + + ctx.balancer_ip = res.host + ctx.balancer_port = res.port + + ngx_var.upstream_host = res.domain or res.host + if ngx.get_phase() == "balancer" then + balancer.recreate_request() + end + + return true +end + + +function _M.access() + local ctx = ngx.ctx + -- Nginx's DNS resolver doesn't support search option, + -- so we have to use our own resolver + resolve_servers(ctx) + local ok, err = pick_node(ctx) + if not ok then + return response_err(err) + end +end + + +function _M.balancer() + local ctx = ngx.ctx + if not ctx.balancer_run then + ctx.balancer_run = true + local retries = #servers - 1 + local ok, err = balancer.set_more_tries(retries) + if not ok then + core.log.error("could not set upstream retries: ", err) + elseif err then + core.log.warn("could not set upstream retries: ", err) + end + else + if ctx.server_picker and ctx.server_picker.after_balance then + ctx.server_picker.after_balance(ctx, true) + end + + report_failure(ctx.balancer_server) + + local ok, err = pick_node(ctx) + if not ok then + return response_err(err) + end + end + + local ok, err = balancer.set_current_peer(ctx.balancer_ip, ctx.balancer_port) + if not ok then + return response_err(err) + end +end + + +function _M.log() + local ctx = ngx.ctx + if ctx.server_picker and ctx.server_picker.after_balance then + ctx.server_picker.after_balance(ctx, false) + end +end + + +return _M diff --git a/apisix/constants.lua b/apisix/constants.lua index cf04e890cc8c..1c82ec3d49cd 100644 --- a/apisix/constants.lua +++ b/apisix/constants.lua @@ -23,20 +23,20 @@ return { HTTP_ETCD_DIRECTORY = { ["/upstreams"] = true, ["/plugins"] = true, - ["/ssl"] = true, + ["/ssls"] = true, ["/stream_routes"] = true, ["/plugin_metadata"] = true, ["/routes"] = true, ["/services"] = true, ["/consumers"] = true, ["/global_rules"] = true, - ["/proto"] = true, + ["/protos"] = true, ["/plugin_configs"] = true, }, STREAM_ETCD_DIRECTORY = { ["/upstreams"] = true, ["/plugins"] = true, - ["/ssl"] = true, + ["/ssls"] = true, ["/stream_routes"] = true, ["/plugin_metadata"] = true, }, diff --git a/apisix/control/v1.lua b/apisix/control/v1.lua index bbe457cd607f..c6d1e065041f 100644 --- a/apisix/control/v1.lua +++ b/apisix/control/v1.lua @@ -276,6 +276,28 @@ function _M.dump_service_info() return 200, info end +function _M.dump_all_plugin_metadata() + local names = core.config.local_conf().plugins + local metadatas = core.table.new(0, #names) + for _, name in ipairs(names) do + local metadata = plugin.plugin_metadata(name) + if metadata then + core.table.insert(metadatas, metadata.value) + end + end + return 200, metadatas +end + +function _M.dump_plugin_metadata() + local uri_segs = core.utils.split_uri(ngx_var.uri) + local name = uri_segs[4] + local metadata = plugin.plugin_metadata(name) + if not metadata then + return 404, {error_msg = str_format("plugin metadata[%s] not found", name)} + end + return 200, metadata.value +end + return { -- /v1/schema @@ -337,5 +359,17 @@ return { methods = {"GET"}, uris = {"/upstream/*"}, handler = _M.dump_upstream_info, + }, + -- /v1/plugin_metadatas + { + methods = {"GET"}, + uris = {"/plugin_metadatas"}, + handler = _M.dump_all_plugin_metadata, + }, + -- /v1/plugin_metadata/* + { + methods = {"GET"}, + uris = {"/plugin_metadata/*"}, + handler = _M.dump_plugin_metadata, } } diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua index 8736059f7bf5..e432b05d950d 100644 --- a/apisix/core/config_etcd.lua +++ b/apisix/core/config_etcd.lua @@ -21,6 +21,7 @@ local table = require("apisix.core.table") local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") local log = require("apisix.core.log") local json = require("apisix.core.json") local etcd_apisix = require("apisix.core.etcd") @@ -212,14 +213,17 @@ local function load_full_data(self, dir_res, headers) self:upgrade_version(item.modifiedIndex) else - if not dir_res.nodes then - dir_res.nodes = {} + -- here dir_res maybe res.body.node or res.body.list + -- we need make values equals to res.body.node.nodes or res.body.list + local values = (dir_res and dir_res.nodes) or dir_res + if not values then + values = {} end - self.values = new_tab(#dir_res.nodes, 0) - self.values_hash = new_tab(0, #dir_res.nodes) + self.values = new_tab(#values, 0) + self.values_hash = new_tab(0, #values) - for _, item in ipairs(dir_res.nodes) do + for _, item in ipairs(values) do local key = short_key(self, item.key) local data_valid = true if type(item.value) ~= "table" then @@ -302,7 +306,7 @@ local function sync_data(self) return false, err end - local dir_res, headers = res.body.node or {}, res.headers + local dir_res, headers = res.body.list or {}, res.headers log.debug("readdir key: ", self.key, " res: ", json.delay_encode(dir_res)) if not dir_res then @@ -311,12 +315,7 @@ local function sync_data(self) if self.values then for i, val in ipairs(self.values) do - if val and val.clean_handlers then - for _, clean_handler in ipairs(val.clean_handlers) do - clean_handler(val) - end - val.clean_handlers = nil - end + config_util.fire_all_clean_handlers(val) end self.values = nil @@ -403,11 +402,8 @@ local function sync_data(self) local pre_index = self.values_hash[key] if pre_index then local pre_val = self.values[pre_index] - if pre_val and pre_val.clean_handlers then - for _, clean_handler in ipairs(pre_val.clean_handlers) do - clean_handler(pre_val) - end - pre_val.clean_handlers = nil + if pre_val then + config_util.fire_all_clean_handlers(pre_val) end if res.value then @@ -511,7 +507,7 @@ do end local err - etcd_cli, err = etcd_apisix.new() + etcd_cli, err = etcd_apisix.switch_proxy() return etcd_cli, err end end @@ -523,6 +519,7 @@ local function _automatic_fetch(premature, self) end if not (health_check.conf and health_check.conf.shm_name) then + -- used for worker processes to synchronize configuration local _, err = health_check.init({ shm_name = health_check_shm_name, fail_timeout = self.health_check_timeout, @@ -811,18 +808,13 @@ function _M.init() return true end - local etcd_cli, err = get_etcd() + -- don't go through proxy during start because the proxy is not available + local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() if not etcd_cli then return nil, "failed to start a etcd instance: " .. err end - -- don't go through proxy during start because the proxy is not available - local proxy = etcd_cli.unix_socket_proxy - etcd_cli.unix_socket_proxy = nil - local etcd_conf = local_conf.etcd - local prefix = etcd_conf.prefix local res, err = readdir(etcd_cli, prefix, create_formatter(prefix)) - etcd_cli.unix_socket_proxy = proxy if not res then return nil, err end diff --git a/apisix/core/config_util.lua b/apisix/core/config_util.lua index 8a2ce7b57b7b..b3fb13b7cce5 100644 --- a/apisix/core/config_util.lua +++ b/apisix/core/config_util.lua @@ -20,8 +20,10 @@ -- @module core.config_util local core_tab = require("apisix.core.table") +local log = require("apisix.core.log") local str_byte = string.byte local str_char = string.char +local ipairs = ipairs local setmetatable = setmetatable local tostring = tostring local type = type @@ -56,23 +58,56 @@ end -- or cancelled. Note that Nginx worker exit doesn't trigger the clean handler. -- Return an index so that we can cancel it later. function _M.add_clean_handler(item, func) - local idx = #item.clean_handlers + 1 - item.clean_handlers[idx] = func - return idx + if not item.clean_handlers._id then + item.clean_handlers._id = 1 + end + + local id = item.clean_handlers._id + item.clean_handlers._id = item.clean_handlers._id + 1 + core_tab.insert(item.clean_handlers, {f = func, id = id}) + return id end -- cancel a clean handler added by add_clean_handler. -- If `fire` is true, call the clean handler. function _M.cancel_clean_handler(item, idx, fire) - local f = item.clean_handlers[idx] - core_tab.remove(item.clean_handlers, idx) + local pos, f + -- the number of pending clean handler is small so we can cancel them in O(n) + for i, clean_handler in ipairs(item.clean_handlers) do + if clean_handler.id == idx then + pos = i + f = clean_handler.f + break + end + end + + if not pos then + log.error("failed to find clean_handler with idx ", idx) + return + end + + core_tab.remove(item.clean_handlers, pos) if fire then f(item) end end +-- fire all clean handlers added by add_clean_handler. +function _M.fire_all_clean_handlers(item) + if not item.clean_handlers then + return + end + + for _, clean_handler in ipairs(item.clean_handlers) do + clean_handler.f(item) + end + + item.clean_handlers = nil +end + + --- -- Convert different time units to seconds as time units. -- Time intervals can be specified in milliseconds, seconds, minutes, hours, days and so on, diff --git a/apisix/core/config_xds.lua b/apisix/core/config_xds.lua index e5e452f7eec6..bdb45206a917 100644 --- a/apisix/core/config_xds.lua +++ b/apisix/core/config_xds.lua @@ -20,9 +20,11 @@ -- @module core.config_xds local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") local string = require("apisix.core.string") local log = require("apisix.core.log") local json = require("apisix.core.json") +local os = require("apisix.core.os") local ngx_sleep = require("apisix.core.utils").sleep local check_schema = require("apisix.core.schema").check local new_tab = require("table.new") @@ -67,10 +69,7 @@ end ffi.cdef[[ -typedef unsigned int useconds_t; - extern void initial(void* config_zone, void* version_zone); -int usleep(useconds_t usec); ]] local created_obj = {} @@ -153,12 +152,7 @@ local function sync_data(self) if self.values then for _, val in ipairs(self.values) do - if val and val.clean_handlers then - for _, clean_handler in ipairs(val.clean_handlers) do - clean_handler(val) - end - val.clean_handlers = nil - end + config_util.fire_all_clean_handlers(val) end self.values = nil self.values_hash = nil @@ -323,7 +317,7 @@ function _M.new(key, opts) -- blocking until xds completes initial configuration while true do - C.usleep(0.1) + os.usleep(1000) fetch_version() if latest_version then break diff --git a/apisix/core/config_yaml.lua b/apisix/core/config_yaml.lua index 24a5ff57aa6f..0c6564caf6bb 100644 --- a/apisix/core/config_yaml.lua +++ b/apisix/core/config_yaml.lua @@ -20,6 +20,7 @@ -- @module core.config_yaml local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") local yaml = require("tinyyaml") local log = require("apisix.core.log") local json = require("apisix.core.json") @@ -142,12 +143,7 @@ local function sync_data(self) if self.values then for _, item in ipairs(self.values) do - if item.clean_handlers then - for _, clean_handler in ipairs(item.clean_handlers) do - clean_handler(item) - end - item.clean_handlers = nil - end + config_util.fire_all_clean_handlers(item) end self.values = nil end diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua index 9d289bd5d6e5..f6b1ddbd5cc4 100644 --- a/apisix/core/etcd.lua +++ b/apisix/core/etcd.lua @@ -19,26 +19,32 @@ -- -- @module core.etcd -local fetch_local_conf = require("apisix.core.config_local").local_conf -local array_mt = require("apisix.core.json").array_mt -local etcd = require("resty.etcd") -local clone_tab = require("table.clone") -local health_check = require("resty.etcd.health_check") -local ipairs = ipairs -local setmetatable = setmetatable -local string = string -local tonumber = tonumber +local fetch_local_conf = require("apisix.core.config_local").local_conf +local array_mt = require("apisix.core.json").array_mt +local v3_adapter = require("apisix.admin.v3_adapter") +local etcd = require("resty.etcd") +local clone_tab = require("table.clone") +local health_check = require("resty.etcd.health_check") +local ipairs = ipairs +local setmetatable = setmetatable +local string = string +local tonumber = tonumber +local ngx_config_prefix = ngx.config.prefix() +local ngx_socket_tcp = ngx.socket.tcp +local ngx_get_phase = ngx.get_phase + + +local is_http = ngx.config.subsystem == "http" local _M = {} --- this function create the etcd client instance used in the Admin API -local function new() - local local_conf, err = fetch_local_conf() - if not local_conf then - return nil, nil, err - end +local function has_mtls_support() + local s = ngx_socket_tcp() + return s.tlshandshake ~= nil +end - local etcd_conf = clone_tab(local_conf.etcd) + +local function _new(etcd_conf) local prefix = etcd_conf.prefix etcd_conf.http_host = etcd_conf.host etcd_conf.host = nil @@ -63,24 +69,129 @@ local function new() end end - -- enable etcd health check retry for curr worker - if not health_check.conf then + local etcd_cli, err = etcd.new(etcd_conf) + if not etcd_cli then + return nil, nil, err + end + + return etcd_cli, prefix +end + + +local function new() + local local_conf, err = fetch_local_conf() + if not local_conf then + return nil, nil, err + end + + local etcd_conf = clone_tab(local_conf.etcd) + local proxy_by_conf_server = false + + if local_conf.deployment then + if local_conf.deployment.role == "traditional" + -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in + -- daily development. However, a stream proxy can't be the CP. + -- Hence, generate a HTTP conf server to proxy etcd requests in stream proxy is + -- unnecessary and inefficient. + and is_http + then + local sock_prefix = ngx_config_prefix + etcd_conf.unix_socket_proxy = + "unix:" .. sock_prefix .. "/conf/config_listen.sock" + etcd_conf.host = {"http://127.0.0.1:2379"} + proxy_by_conf_server = true + + elseif local_conf.deployment.role == "control_plane" then + local addr = local_conf.deployment.role_control_plane.conf_server.listen + etcd_conf.host = {"https://" .. addr} + etcd_conf.tls = { + verify = false, + } + + if has_mtls_support() and local_conf.deployment.certs.cert then + local cert = local_conf.deployment.certs.cert + local cert_key = local_conf.deployment.certs.cert_key + etcd_conf.tls.cert = cert + etcd_conf.tls.key = cert_key + end + + proxy_by_conf_server = true + + elseif local_conf.deployment.role == "data_plane" then + if has_mtls_support() and local_conf.deployment.certs.cert then + local cert = local_conf.deployment.certs.cert + local cert_key = local_conf.deployment.certs.cert_key + + if not etcd_conf.tls then + etcd_conf.tls = {} + end + + etcd_conf.tls.cert = cert + etcd_conf.tls.key = cert_key + end + end + end + + -- if an unhealthy etcd node is selected in a single admin read/write etcd operation, + -- the retry mechanism for health check can select another healthy etcd node + -- to complete the read/write etcd operation. + if proxy_by_conf_server then + -- health check is done in conf server + health_check.disable() + elseif not health_check.conf then health_check.init({ - max_fails = #etcd_conf.http_host, + max_fails = 1, retry = true, }) end - local etcd_cli - etcd_cli, err = etcd.new(etcd_conf) - if not etcd_cli then + return _new(etcd_conf) +end +_M.new = new + + +--- +-- Create an etcd client which will connect to etcd without being proxyed by conf server. +-- This method is used in init_worker phase when the conf server is not ready. +-- +-- @function core.etcd.new_without_proxy +-- @treturn table|nil the etcd client, or nil if failed. +-- @treturn string|nil the configured prefix of etcd keys, or nil if failed. +-- @treturn nil|string the error message. +local function new_without_proxy() + local local_conf, err = fetch_local_conf() + if not local_conf then return nil, nil, err end - return etcd_cli, prefix + local etcd_conf = clone_tab(local_conf.etcd) + return _new(etcd_conf) end -_M.new = new +_M.new_without_proxy = new_without_proxy + + +local function switch_proxy() + if ngx_get_phase() == "init" or ngx_get_phase() == "init_worker" then + return new_without_proxy() + end + + local etcd_cli, prefix, err = new() + if not etcd_cli or err then + return etcd_cli, prefix, err + end + if not etcd_cli.unix_socket_proxy then + return etcd_cli, prefix, err + end + local sock = ngx_socket_tcp() + local ok = sock:connect(etcd_cli.unix_socket_proxy) + if not ok then + return new_without_proxy() + end + + return etcd_cli, prefix, err +end +_M.switch_proxy = switch_proxy -- convert ETCD v3 entry to v2 one local function kvs_to_node(kvs) @@ -133,7 +244,7 @@ function _M.get_format(res, real_key, is_dir, formatter) return not_found(res) end - res.body.action = "get" + v3_adapter.to_v3(res.body, "get") if formatter then return formatter(res) @@ -161,6 +272,7 @@ function _M.get_format(res, real_key, is_dir, formatter) end res.body.kvs = nil + v3_adapter.to_v3_list(res.body) return res end @@ -194,7 +306,7 @@ end function _M.get(key, is_dir) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -213,7 +325,7 @@ end local function set(key, value, ttl) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -234,10 +346,14 @@ local function set(key, value, ttl) return nil, err end + if res.body.error then + return nil, res.body.error + end + res.headers["X-Etcd-Index"] = res.body.header.revision -- etcd v3 set would not return kv info - res.body.action = "set" + v3_adapter.to_v3(res.body, "set") res.body.node = {} res.body.node.key = prefix .. key res.body.node.value = value @@ -253,7 +369,7 @@ _M.set = set function _M.atomic_set(key, value, ttl, mod_revision) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -300,7 +416,7 @@ function _M.atomic_set(key, value, ttl, mod_revision) res.headers["X-Etcd-Index"] = res.body.header.revision -- etcd v3 set would not return kv info - res.body.action = "compareAndSwap" + v3_adapter.to_v3(res.body, "compareAndSwap") res.body.node = { key = key, value = value, @@ -312,7 +428,7 @@ end function _M.push(key, value, ttl) - local etcd_cli, _, err = new() + local etcd_cli, _, err = switch_proxy() if not etcd_cli then return nil, err end @@ -338,13 +454,13 @@ function _M.push(key, value, ttl) return nil, err end - res.body.action = "create" + v3_adapter.to_v3(res.body, "create") return res, nil end function _M.delete(key) - local etcd_cli, prefix, err = new() + local etcd_cli, prefix, err = switch_proxy() if not etcd_cli then return nil, err end @@ -362,7 +478,7 @@ function _M.delete(key) end -- etcd v3 set would not return kv info - res.body.action = "delete" + v3_adapter.to_v3(res.body, "delete") res.body.node = {} res.body.key = prefix .. key @@ -382,7 +498,7 @@ end -- -- etcdserver = "3.5.0" -- -- } function _M.server_version() - local etcd_cli, err = new() + local etcd_cli, _, err = switch_proxy() if not etcd_cli then return nil, err end @@ -392,7 +508,7 @@ end function _M.keepalive(id) - local etcd_cli, _, err = new() + local etcd_cli, _, err = switch_proxy() if not etcd_cli then return nil, err end diff --git a/apisix/core/os.lua b/apisix/core/os.lua index ae721e883435..4a922d01e43d 100644 --- a/apisix/core/os.lua +++ b/apisix/core/os.lua @@ -23,6 +23,9 @@ local ffi = require("ffi") local ffi_str = ffi.string local ffi_errno = ffi.errno local C = ffi.C +local ceil = math.ceil +local floor = math.floor +local error = error local tostring = tostring local type = type @@ -71,6 +74,20 @@ function _M.setenv(name, value) end +--- +-- sleep blockingly in microseconds +-- +-- @function core.os.usleep +-- @tparam number us The number of microseconds. +local function usleep(us) + if ceil(us) ~= floor(us) then + error("bad microseconds: " .. us) + end + C.usleep(us) +end +_M.usleep = usleep + + local function waitpid_nohang(pid) local res = C.waitpid(pid, nil, WNOHANG) if res == -1 then @@ -86,7 +103,7 @@ function _M.waitpid(pid, timeout) local total = timeout * 1000 * 1000 while step * count < total do count = count + 1 - C.usleep(step) + usleep(step) local ok, err = waitpid_nohang(pid) if err then return nil, err diff --git a/apisix/core/profile.lua b/apisix/core/profile.lua index b4c4cfaa908f..389a9d42ccec 100644 --- a/apisix/core/profile.lua +++ b/apisix/core/profile.lua @@ -21,7 +21,7 @@ local _M = { version = 0.1, - profile = os.getenv("APISIX_PROFILE"), + profile = os.getenv("APISIX_PROFILE") or "", apisix_home = (ngx and ngx.config.prefix()) or "" } @@ -40,7 +40,7 @@ local _M = { -- local local_conf_path = profile:yaml_path("config") function _M.yaml_path(self, file_name) local file_path = self.apisix_home .. "conf/" .. file_name - if self.profile and file_name ~= "config-default" then + if self.profile ~= "" and file_name ~= "config-default" then file_path = file_path .. "-" .. self.profile end diff --git a/apisix/core/pubsub.lua b/apisix/core/pubsub.lua index 798153a5060e..d6bcafad11f4 100644 --- a/apisix/core/pubsub.lua +++ b/apisix/core/pubsub.lua @@ -34,7 +34,7 @@ local mt = { __index = _M } local pb_state local function init_pb_state() -- clear current pb state - pb.state(nil) + local old_pb_state = pb.state(nil) -- set int64 rule for pubsub module pb.option("int64_as_string") @@ -42,19 +42,15 @@ local function init_pb_state() -- initialize protoc compiler protoc.reload() local pubsub_protoc = protoc.new() - - -- compile the protobuf file on initial load module - -- ensure that each worker is loaded once - if not pubsub_protoc.loaded["pubsub.proto"] then - pubsub_protoc:addpath("apisix/include/apisix/model") - local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto") - if not ok then - pubsub_protoc:reset() - return "failed to load pubsub protocol: " .. err - end + pubsub_protoc:addpath("apisix/include/apisix/model") + local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto") + if not ok then + pubsub_protoc:reset() + pb.state(old_pb_state) + return "failed to load pubsub protocol: " .. err end - pb_state = pb.state(nil) + pb_state = pb.state(old_pb_state) end diff --git a/apisix/core/table.lua b/apisix/core/table.lua index 2a6bb47ced61..4346863079cf 100644 --- a/apisix/core/table.lua +++ b/apisix/core/table.lua @@ -41,6 +41,7 @@ local _M = { sort = table.sort, clone = require("table.clone"), isarray = require("table.isarray"), + isempty = require("table.isempty"), } @@ -91,6 +92,10 @@ end -- local arr = {"a", "b", "c"} -- local idx = core.table.array_find(arr, "b") -- idx = 2 function _M.array_find(array, val) + if type(array) ~= "table" then + return nil + end + for i, v in ipairs(array) do if v == val then return i diff --git a/apisix/core/version.lua b/apisix/core/version.lua index 3b0e34726fe4..242b667a772a 100644 --- a/apisix/core/version.lua +++ b/apisix/core/version.lua @@ -20,5 +20,5 @@ -- @module core.version return { - VERSION = "2.14.1" + VERSION = "2.15.0" } diff --git a/apisix/discovery/kubernetes/informer_factory.lua b/apisix/discovery/kubernetes/informer_factory.lua index a03f27a5ac68..3dca064039fb 100644 --- a/apisix/discovery/kubernetes/informer_factory.lua +++ b/apisix/discovery/kubernetes/informer_factory.lua @@ -263,6 +263,9 @@ local function list_watch(informer, apiserver) local reason, message local httpc = http.new() + informer.continue = "" + informer.version = "" + informer.fetch_state = "connecting" core.log.info("begin to connect ", apiserver.host, ":", apiserver.port) diff --git a/apisix/init.lua b/apisix/init.lua index af0f22553f1d..80621432c03b 100644 --- a/apisix/init.lua +++ b/apisix/init.lua @@ -27,6 +27,7 @@ require("jit.opt").start("minstitch=2", "maxtrace=4000", require("apisix.patch").patch() local core = require("apisix.core") +local conf_server = require("apisix.conf_server") local plugin = require("apisix.plugin") local plugin_config = require("apisix.plugin_config") local script = require("apisix.script") @@ -40,7 +41,6 @@ local apisix_ssl = require("apisix.ssl") local upstream_util = require("apisix.utils.upstream") local xrpc = require("apisix.stream.xrpc") local ctxdump = require("resty.ctxdump") -local ngx_balancer = require("ngx.balancer") local debug = require("apisix.debug") local pubsub_kafka = require("apisix.pubsub.kafka") local ngx = ngx @@ -56,7 +56,6 @@ local str_byte = string.byte local str_sub = string.sub local tonumber = tonumber local pairs = pairs -local type = type local control_api_router local is_http = false @@ -96,6 +95,7 @@ function _M.http_init(args) end xrpc.init() + conf_server.init() end @@ -152,6 +152,9 @@ end function _M.http_exit_worker() + -- TODO: we can support stream plugin later - currently there is not `destory` method + -- in stream plugins + plugin.exit_worker() require("apisix.plugins.ext-plugin.init").exit_worker() end @@ -218,23 +221,16 @@ local function set_upstream_host(api_ctx, picked_server) return end - local nodes_count = up_conf.nodes and #up_conf.nodes or 0 - if nodes_count == 1 or ngx_balancer.recreate_request then - api_ctx.var.upstream_host = picked_server.upstream_host - end + api_ctx.var.upstream_host = picked_server.upstream_host end local function set_upstream_headers(api_ctx, picked_server) set_upstream_host(api_ctx, picked_server) - local hdr = core.request.header(api_ctx, "X-Forwarded-Proto") - if hdr then - if type(hdr) == "table" then - api_ctx.var.var_x_forwarded_proto = hdr[1] - else - api_ctx.var.var_x_forwarded_proto = hdr - end + local proto = api_ctx.var.http_x_forwarded_proto + if proto then + api_ctx.var.var_x_forwarded_proto = proto end end @@ -424,6 +420,10 @@ function _M.http_access_phase() api_ctx.route_id = route.value.id api_ctx.route_name = route.value.name + local ref = ctxdump.stash_ngx_ctx() + core.log.info("stash ngx ctx: ", ref) + ngx_var.ctx_ref = ref + -- run global rule plugin.run_global_rules(api_ctx, router.global_rules, nil) @@ -450,9 +450,10 @@ function _M.http_access_phase() if changed then api_ctx.matched_route = route core.table.clear(api_ctx.plugins) - api_ctx.plugins = plugin.filter(api_ctx, route, api_ctx.plugins) + local phase = "rewrite_in_consumer" + api_ctx.plugins = plugin.filter(api_ctx, route, api_ctx.plugins, nil, phase) -- rerun rewrite phase for newly added plugins in consumer - plugin.run_plugin("rewrite_in_consumer", api_ctx.plugins, api_ctx) + plugin.run_plugin(phase, api_ctx.plugins, api_ctx) end end plugin.run_plugin("access", plugins, api_ctx) @@ -526,10 +527,6 @@ function _M.http_access_phase() core.log.info("enabled websocket for route: ", route.value.id) end - if route.value.service_protocol == "grpc" then - api_ctx.upstream_scheme = "grpc" - end - -- load balancer is not required by kafka upstream, so the upstream -- node selection process is intercepted and left to kafka to -- handle on its own @@ -556,10 +553,6 @@ function _M.http_access_phase() -- run the before_proxy method in access phase first to avoid always reinit request common_phase("before_proxy") - local ref = ctxdump.stash_ngx_ctx() - core.log.info("stash ngx ctx: ", ref) - ngx_var.ctx_ref = ref - local up_scheme = api_ctx.upstream_scheme if up_scheme == "grpcs" or up_scheme == "grpc" then return ngx.exec("@grpc_pass") diff --git a/apisix/plugin.lua b/apisix/plugin.lua index 5aad12e8926c..3cf2283a9aae 100644 --- a/apisix/plugin.lua +++ b/apisix/plugin.lua @@ -19,6 +19,7 @@ local core = require("apisix.core") local config_util = require("apisix.core.config_util") local enable_debug = require("apisix.debug").enable_debug local wasm = require("apisix.wasm") +local expr = require("resty.expr.v1") local ngx = ngx local crc32 = ngx.crc32_short local ngx_exit = ngx.exit @@ -40,8 +41,11 @@ local stream_local_plugins_hash = core.table.new(0, 32) local merged_route = core.lrucache.new({ ttl = 300, count = 512 }) +local expr_lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) local local_conf - +local check_plugin_metadata local _M = { version = 0.3, @@ -68,6 +72,25 @@ local function sort_plugin(l, r) return l.priority > r.priority end +local function custom_sort_plugin(l, r) + return l._meta.priority > r._meta.priority +end + +local function check_disable(plugin_conf) + if not plugin_conf then + return nil + end + + if not plugin_conf._meta then + return nil + end + + if type(plugin_conf._meta) ~= "table" then + return nil + end + + return plugin_conf._meta.disable +end local PLUGIN_TYPE_HTTP = 1 local PLUGIN_TYPE_STREAM = 2 @@ -135,14 +158,6 @@ local function load_plugin(name, plugins_list, plugin_type) local plugin_injected_schema = core.schema.plugin_injected_schema if plugin.schema['$comment'] ~= plugin_injected_schema['$comment'] then - if properties.disable then - core.log.error("invalid plugin [", name, - "]: found forbidden 'disable' field in the schema") - return - end - - properties.disable = plugin_injected_schema.disable - if properties._meta then core.log.error("invalid plugin [", name, "]: found forbidden '_meta' field in the schema") @@ -151,6 +166,8 @@ local function load_plugin(name, plugins_list, plugin_type) properties._meta = plugin_injected_schema._meta -- new injected fields should be added under `_meta` + -- 1. so we won't break user's code when adding any new injected fields + -- 2. the semantics is clear, especially in the doc and in the caller side plugin.schema['$comment'] = plugin_injected_schema['$comment'] end @@ -265,7 +282,7 @@ local function load_stream(plugin_names) end -function _M.load(config) +local function get_plugin_names(config) local http_plugin_names local stream_plugin_names @@ -287,7 +304,7 @@ function _M.load(config) local plugins_conf = config.value -- plugins_conf can be nil when another instance writes into etcd key "/apisix/plugins/" if not plugins_conf then - return local_plugins + return true end for _, conf in ipairs(plugins_conf) do @@ -299,6 +316,16 @@ function _M.load(config) end end + return false, http_plugin_names, stream_plugin_names +end + + +function _M.load(config) + local ignored, http_plugin_names, stream_plugin_names = get_plugin_names(config) + if ignored then + return local_plugins + end + if ngx.config.subsystem == "http" then if not http_plugin_names then core.log.error("failed to read plugin list from local file") @@ -329,6 +356,24 @@ function _M.load(config) end +function _M.exit_worker() + for name, plugin in pairs(local_plugins_hash) do + local ty = PLUGIN_TYPE_HTTP + if plugin.type == "wasm" then + ty = PLUGIN_TYPE_HTTP_WASM + end + unload_plugin(name, ty) + end + + -- we need to load stream plugin so that we can check their schemas in + -- Admin API. Maybe we can avoid calling `load` in this case? So that + -- we don't need to call `destroy` too + for name in pairs(stream_local_plugins_hash) do + unload_plugin(name, PLUGIN_TYPE_STREAM) + end +end + + local function trace_plugins_info_for_debug(ctx, plugins) if not enable_debug() then return @@ -364,8 +409,34 @@ local function trace_plugins_info_for_debug(ctx, plugins) end end +local function meta_filter(ctx, plugin_name, plugin_conf) + local filter = plugin_conf._meta and plugin_conf._meta.filter + if not filter then + return true + end + + local ex, ok, err + if ctx then + ex, err = expr_lrucache(plugin_name .. ctx.conf_type .. ctx.conf_id, + ctx.conf_version, expr.new, filter) + else + ex, err = expr.new(filter) + end + if not ex then + core.log.warn("failed to get the 'vars' expression: ", err , + " plugin_name: ", plugin_name) + return true + end + ok, err = ex:eval() + if err then + core.log.warn("failed to run the 'vars' expression: ", err, + " plugin_name: ", plugin_name) + return true + end + return ok +end -function _M.filter(ctx, conf, plugins, route_conf) +function _M.filter(ctx, conf, plugins, route_conf, phase) local user_plugin_conf = conf.value.plugins if user_plugin_conf == nil or core.table.nkeys(user_plugin_conf) == 0 then @@ -375,29 +446,85 @@ function _M.filter(ctx, conf, plugins, route_conf) return plugins or core.tablepool.fetch("plugins", 0, 0) end + local custom_sort = false local route_plugin_conf = route_conf and route_conf.value.plugins plugins = plugins or core.tablepool.fetch("plugins", 32, 0) for _, plugin_obj in ipairs(local_plugins) do local name = plugin_obj.name local plugin_conf = user_plugin_conf[name] - if type(plugin_conf) == "table" and not plugin_conf.disable then + if type(plugin_conf) ~= "table" then + goto continue + end + + local matched = meta_filter(ctx, name, plugin_conf) + local disable = check_disable(plugin_conf) + if not disable and matched then if plugin_obj.run_policy == "prefer_route" and route_plugin_conf ~= nil then local plugin_conf_in_route = route_plugin_conf[name] - if plugin_conf_in_route and not plugin_conf_in_route.disable then + local disable_in_route = check_disable(plugin_conf_in_route) + if plugin_conf_in_route and not disable_in_route then goto continue end end + if plugin_conf._meta and plugin_conf._meta.priority then + custom_sort = true + end core.table.insert(plugins, plugin_obj) core.table.insert(plugins, plugin_conf) - - ::continue:: end + + ::continue:: end trace_plugins_info_for_debug(ctx, plugins) + if custom_sort then + local tmp_plugin_objs = core.tablepool.fetch("tmp_plugin_objs", 0, #plugins / 2) + local tmp_plugin_confs = core.tablepool.fetch("tmp_plugin_confs", #plugins / 2, 0) + + for i = 1, #plugins, 2 do + local plugin_obj = plugins[i] + local plugin_conf = plugins[i + 1] + + -- in the rewrite phase, the plugin executes in the following order: + -- 1. execute the rewrite phase of the plugins on route(including the auth plugins) + -- 2. merge plugins from consumer and route + -- 3. execute the rewrite phase of the plugins on consumer(phase: rewrite_in_consumer) + -- in this case, we need to skip the plugins that was already executed(step 1) + if phase == "rewrite_in_consumer" and not plugin_conf._from_consumer then + plugin_conf._skip_rewrite_in_consumer = true + end + + tmp_plugin_objs[plugin_conf] = plugin_obj + core.table.insert(tmp_plugin_confs, plugin_conf) + + if not plugin_conf._meta then + plugin_conf._meta = core.table.new(0, 1) + plugin_conf._meta.priority = plugin_obj.priority + else + if not plugin_conf._meta.priority then + plugin_conf._meta.priority = plugin_obj.priority + end + end + end + + sort_tab(tmp_plugin_confs, custom_sort_plugin) + + local index + for i = 1, #tmp_plugin_confs do + index = i * 2 - 1 + local plugin_conf = tmp_plugin_confs[i] + local plugin_obj = tmp_plugin_objs[plugin_conf] + plugins[index] = plugin_obj + plugins[index + 1] = plugin_conf + end + + core.tablepool.release("tmp_plugin_objs", tmp_plugin_objs) + core.tablepool.release("tmp_plugin_confs", tmp_plugin_confs) + end + return plugins end @@ -414,7 +541,8 @@ function _M.stream_filter(user_route, plugins) local name = plugin_obj.name local plugin_conf = user_plugin_conf[name] - if type(plugin_conf) == "table" and not plugin_conf.disable then + local disable = check_disable(plugin_conf) + if type(plugin_conf) == "table" and not disable then core.table.insert(plugins, plugin_obj) core.table.insert(plugins, plugin_conf) end @@ -564,22 +692,30 @@ end function _M.init_worker() - _M.load() + local _, http_plugin_names, stream_plugin_names = get_plugin_names() -- some plugins need to be initialized in init* phases - if is_http and local_plugins_hash["prometheus"] then - local prometheus_enabled_in_stream = stream_local_plugins_hash["prometheus"] + if is_http and core.table.array_find(http_plugin_names, "prometheus") then + local prometheus_enabled_in_stream = + core.table.array_find(stream_plugin_names, "prometheus") require("apisix.plugins.prometheus.exporter").http_init(prometheus_enabled_in_stream) - elseif not is_http and stream_local_plugins_hash["prometheus"] then + elseif not is_http and core.table.array_find(stream_plugin_names, "prometheus") then require("apisix.plugins.prometheus.exporter").stream_init() end + -- someone's plugin needs to be initialized after prometheus + -- see https://github.com/apache/apisix/issues/3286 + _M.load() + if local_conf and not local_conf.apisix.enable_admin then init_plugins_syncer() end local plugin_metadatas, err = core.config.new("/plugin_metadata", - {automatic = true} + { + automatic = true, + checker = check_plugin_metadata + } ) if not plugin_metadatas then error("failed to create etcd instance for fetching /plugin_metadatas : " @@ -633,39 +769,57 @@ function _M.conf_version(conf) end -local function check_schema(plugins_conf, schema_type, skip_disabled_plugin) - for name, plugin_conf in pairs(plugins_conf) do - core.log.info("check plugin schema, name: ", name, ", configurations: ", - core.json.delay_encode(plugin_conf, true)) - if type(plugin_conf) ~= "table" then - return false, "invalid plugin conf " .. - core.json.encode(plugin_conf, true) .. - " for plugin [" .. name .. "]" - end +local function check_single_plugin_schema(name, plugin_conf, schema_type, skip_disabled_plugin) + core.log.info("check plugin schema, name: ", name, ", configurations: ", + core.json.delay_encode(plugin_conf, true)) + if type(plugin_conf) ~= "table" then + return false, "invalid plugin conf " .. + core.json.encode(plugin_conf, true) .. + " for plugin [" .. name .. "]" + end - local plugin_obj = local_plugins_hash[name] - if not plugin_obj then - if skip_disabled_plugin then - goto CONTINUE - else - return false, "unknown plugin [" .. name .. "]" - end + local plugin_obj = local_plugins_hash[name] + if not plugin_obj then + if skip_disabled_plugin then + return true + else + return false, "unknown plugin [" .. name .. "]" end + end - if plugin_obj.check_schema then - local disable = plugin_conf.disable - plugin_conf.disable = nil + if plugin_obj.check_schema then + local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) + if not ok then + return false, "failed to check the configuration of plugin " + .. name .. " err: " .. err + end - local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) + if plugin_conf._meta and plugin_conf._meta.filter then + ok, err = expr.new(plugin_conf._meta.filter) if not ok then - return false, "failed to check the configuration of plugin " - .. name .. " err: " .. err + return nil, "failed to validate the 'vars' expression: " .. err end - - plugin_conf.disable = disable end + end - ::CONTINUE:: + return true +end + + +check_plugin_metadata = function(item) + return check_single_plugin_schema(item.id, item, + core.schema.TYPE_METADATA, true) +end + + + +local function check_schema(plugins_conf, schema_type, skip_disabled_plugin) + for name, plugin_conf in pairs(plugins_conf) do + local ok, err = check_single_plugin_schema(name, plugin_conf, + schema_type, skip_disabled_plugin) + if not ok then + return false, err + end end return true @@ -693,16 +847,11 @@ local function stream_check_schema(plugins_conf, schema_type, skip_disabled_plug end if plugin_obj.check_schema then - local disable = plugin_conf.disable - plugin_conf.disable = nil - local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) if not ok then return false, "failed to check the configuration of " .. "stream plugin [" .. name .. "]: " .. err end - - plugin_conf.disable = disable end ::CONTINUE:: @@ -749,11 +898,20 @@ function _M.run_plugin(phase, plugins, api_ctx) and phase ~= "delayed_body_filter" then for i = 1, #plugins, 2 do - if phase == "rewrite_in_consumer" and plugins[i + 1]._from_consumer - and plugins[i].type ~= "auth"then - phase = "rewrite" + local phase_func + if phase == "rewrite_in_consumer" then + if plugins[i].type == "auth" then + plugins[i + 1]._skip_rewrite_in_consumer = true + end + phase_func = plugins[i]["rewrite"] + else + phase_func = plugins[i][phase] end - local phase_func = plugins[i][phase] + + if phase == "rewrite_in_consumer" and plugins[i + 1]._skip_rewrite_in_consumer then + goto CONTINUE + end + if phase_func then plugin_run = true local conf = plugins[i + 1] @@ -781,6 +939,8 @@ function _M.run_plugin(phase, plugins, api_ctx) end end end + + ::CONTINUE:: end return api_ctx, plugin_run end diff --git a/apisix/plugin_config.lua b/apisix/plugin_config.lua index 903ea6ec1913..cc5a6ff38456 100644 --- a/apisix/plugin_config.lua +++ b/apisix/plugin_config.lua @@ -65,7 +65,9 @@ function _M.merge(route_conf, plugin_config) route_conf.value.plugins = core.table.clone(route_conf.value.plugins) for name, value in pairs(plugin_config.value.plugins) do - route_conf.value.plugins[name] = value + if not route_conf.value.plugins[name] then + route_conf.value.plugins[name] = value + end end route_conf.update_count = route_conf.update_count + 1 diff --git a/apisix/plugins/authz-keycloak.lua b/apisix/plugins/authz-keycloak.lua index 50f718d6b933..336fb69b17ea 100644 --- a/apisix/plugins/authz-keycloak.lua +++ b/apisix/plugins/authz-keycloak.lua @@ -31,8 +31,6 @@ local schema = { token_endpoint = {type = "string", minLength = 1, maxLength = 4096}, resource_registration_endpoint = {type = "string", minLength = 1, maxLength = 4096}, client_id = {type = "string", minLength = 1, maxLength = 100}, - audience = {type = "string", minLength = 1, maxLength = 100, - description = "Deprecated, use `client_id` instead."}, client_secret = {type = "string", minLength = 1, maxLength = 100}, grant_type = { type = "string", @@ -73,6 +71,7 @@ local schema = { maxLength = 4096 }, }, + required = {"client_id"}, allOf = { -- Require discovery or token endpoint. { @@ -81,13 +80,6 @@ local schema = { {required = {"token_endpoint"}} } }, - -- Require client_id or audience. - { - anyOf = { - {required = {"client_id"}}, - {required = {"audience"}} - } - }, -- If lazy_load_paths is true, require discovery or resource registration endpoint. { anyOf = { @@ -120,28 +112,10 @@ local _M = { function _M.check_schema(conf) - -- Check for deprecated audience attribute and emit warnings if used. - if conf.audience then - log.warn("Plugin attribute `audience` is deprecated, use `client_id` instead.") - if conf.client_id then - log.warn("Ignoring `audience` attribute in favor of `client_id`.") - end - end return core.schema.check(schema, conf) end --- Return the configured client ID parameter. -local function authz_keycloak_get_client_id(conf) - if conf.client_id then - -- Prefer client_id, if given. - return conf.client_id - end - - return conf.audience -end - - -- Some auxiliary functions below heavily inspired by the excellent -- lua-resty-openidc module; see https://github.com/zmartzone/lua-resty-openidc @@ -339,7 +313,7 @@ end -- Ensure a valid service account access token is available for the configured client. local function authz_keycloak_ensure_sa_access_token(conf) - local client_id = authz_keycloak_get_client_id(conf) + local client_id = conf.client_id local ttl = conf.cache_ttl_seconds local token_endpoint = authz_keycloak_get_token_endpoint(conf) @@ -648,7 +622,7 @@ local function evaluate_permissions(conf, ctx, token) method = "POST", body = ngx.encode_args({ grant_type = conf.grant_type, - audience = authz_keycloak_get_client_id(conf), + audience = conf.client_id, response_mode = "decision", permission = permission }), @@ -732,7 +706,7 @@ local function generate_token_using_password_grant(conf,ctx) return 422, {message = err} end - local client_id = authz_keycloak_get_client_id(conf) + local client_id = conf.client_id local token_endpoint = authz_keycloak_get_token_endpoint(conf) diff --git a/apisix/plugins/clickhouse-logger.lua b/apisix/plugins/clickhouse-logger.lua index f7b734645334..026f0cfa93da 100644 --- a/apisix/plugins/clickhouse-logger.lua +++ b/apisix/plugins/clickhouse-logger.lua @@ -21,6 +21,7 @@ local core = require("apisix.core") local http = require("resty.http") local url = require("net.url") local plugin = require("apisix.plugin") +local math_random = math.random local ngx = ngx local tostring = tostring @@ -31,7 +32,9 @@ local batch_processor_manager = bp_manager_mod.new(plugin_name) local schema = { type = "object", properties = { + -- deprecated, use "endpoint_addrs" instead endpoint_addr = core.schema.uri_def, + endpoint_addrs = {items = core.schema.uri_def, type = "array", minItems = 1}, user = {type = "string", default = ""}, password = {type = "string", default = ""}, database = {type = "string", default = ""}, @@ -40,7 +43,10 @@ local schema = { name = {type = "string", default = "clickhouse logger"}, ssl_verify = {type = "boolean", default = true}, }, - required = {"endpoint_addr", "user", "password", "database", "logtable"} + oneOf = { + {required = {"endpoint_addr", "user", "password", "database", "logtable"}}, + {required = {"endpoint_addrs", "user", "password", "database", "logtable"}} + }, } @@ -72,11 +78,17 @@ end local function send_http_data(conf, log_message) local err_msg local res = true - local url_decoded = url.parse(conf.endpoint_addr) + local selected_endpoint_addr + if conf.endpoint_addr then + selected_endpoint_addr = conf.endpoint_addr + else + selected_endpoint_addr = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] + end + local url_decoded = url.parse(selected_endpoint_addr) local host = url_decoded.host local port = url_decoded.port - core.log.info("sending a batch logs to ", conf.endpoint_addr) + core.log.info("sending a batch logs to ", selected_endpoint_addr) if not port then if url_decoded.scheme == "https" then diff --git a/apisix/plugins/elasticsearch-logger.lua b/apisix/plugins/elasticsearch-logger.lua new file mode 100644 index 000000000000..105cbe4d98bb --- /dev/null +++ b/apisix/plugins/elasticsearch-logger.lua @@ -0,0 +1,176 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local http = require("resty.http") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local plugin = require("apisix.plugin") + +local ngx = ngx +local str_format = core.string.format + +local plugin_name = "elasticsearch-logger" +local batch_processor_manager = bp_manager_mod.new(plugin_name) + + +local schema = { + type = "object", + properties = { + endpoint_addr = { + type = "string", + pattern = "[^/]$", + }, + field = { + type = "object", + properties = { + index = { type = "string"}, + type = { type = "string"} + }, + required = {"index"} + }, + auth = { + type = "object", + properties = { + username = { + type = "string", + minLength = 1 + }, + password = { + type = "string", + minLength = 1 + }, + }, + required = {"username", "password"}, + }, + timeout = { + type = "integer", + minimum = 1, + default = 10 + }, + ssl_verify = { + type = "boolean", + default = true + } + }, + required = { "endpoint_addr", "field" }, +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = log_util.metadata_schema_log_format, + }, +} + + +local _M = { + version = 0.1, + priority = 413, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + + +local function get_logger_entry(conf, ctx) + local entry + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + if metadata and metadata.value.log_format + and core.table.nkeys(metadata.value.log_format) > 0 + then + entry = log_util.get_custom_format_log(ctx, metadata.value.log_format) + core.log.info("custom log format entry: ", core.json.delay_encode(entry)) + else + entry = log_util.get_full_log(ngx, conf) + core.log.info("full log entry: ", core.json.delay_encode(entry)) + end + + return core.json.encode({ + create = { + _index = conf.field.index, + _type = conf.field.type + } + }) .. "\n" .. + core.json.encode(entry) .. "\n" +end + + +local function send_to_elasticsearch(conf, entries) + local httpc, err = http.new() + if not httpc then + return false, str_format("create http error: %s", err) + end + + local uri = conf.endpoint_addr .. "/_bulk" + local body = core.table.concat(entries, "") + local headers = {["Content-Type"] = "application/x-ndjson"} + if conf.auth then + local authorization = "Basic " .. ngx.encode_base64( + conf.auth.username .. ":" .. conf.auth.password + ) + headers["Authorization"] = authorization + end + + core.log.info("uri: ", uri, ", body: ", body) + + httpc:set_timeout(conf.timeout * 1000) + local resp, err = httpc:request_uri(uri, { + ssl_verify = conf.ssl_verify, + method = "POST", + headers = headers, + body = body + }) + if not resp then + return false, err + end + + if resp.status ~= 200 then + return false, str_format("elasticsearch server returned status: %d, body: %s", + resp.status, resp.body or "") + end + + return true +end + + +function _M.log(conf, ctx) + local entry = get_logger_entry(conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + return send_to_elasticsearch(conf, entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/apisix/plugins/error-log-logger.lua b/apisix/plugins/error-log-logger.lua index f4f28b472153..5aa7a7418cfd 100644 --- a/apisix/plugins/error-log-logger.lua +++ b/apisix/plugins/error-log-logger.lua @@ -66,12 +66,6 @@ local metadata_schema = { }, required = {"endpoint_addr", "user", "password", "database", "logtable"} }, - host = {schema_def.host_def, description = "Deprecated, use `tcp.host` instead."}, - port = {type = "integer", minimum = 0, description = "Deprecated, use `tcp.port` instead."}, - tls = {type = "boolean", default = false, - description = "Deprecated, use `tcp.tls` instead."}, - tls_server_name = {type = "string", - description = "Deprecated, use `tcp.tls_server_name` instead."}, name = {type = "string", default = plugin_name}, level = {type = "string", default = "WARN", enum = {"STDERR", "EMERG", "ALERT", "CRIT", "ERR", "ERROR", "WARN", "NOTICE", "INFO", "DEBUG"}}, diff --git a/apisix/plugins/grpc-transcode.lua b/apisix/plugins/grpc-transcode.lua index 7da62a805fdd..2405d33ec764 100644 --- a/apisix/plugins/grpc-transcode.lua +++ b/apisix/plugins/grpc-transcode.lua @@ -77,15 +77,24 @@ local schema = { required = { "proto_id", "service", "method" }, } +-- Based on https://cloud.google.com/apis/design/errors#handling_errors local status_rel = { - ["3"] = 400, - ["4"] = 504, - ["5"] = 404, - ["7"] = 403, - ["11"] = 416, - ["12"] = 501, - ["13"] = 500, - ["14"] = 503, + ["1"] = 499, -- CANCELLED + ["2"] = 500, -- UNKNOWN + ["3"] = 400, -- INVALID_ARGUMENT + ["4"] = 504, -- DEADLINE_EXCEEDED + ["5"] = 404, -- NOT_FOUND + ["6"] = 409, -- ALREADY_EXISTS + ["7"] = 403, -- PERMISSION_DENIED + ["8"] = 429, -- RESOURCE_EXHAUSTED + ["9"] = 400, -- FAILED_PRECONDITION + ["10"] = 409, -- ABORTED + ["11"] = 400, -- OUT_OF_RANGE + ["12"] = 501, -- UNIMPLEMENTED + ["13"] = 500, -- INTERNAL + ["14"] = 503, -- UNAVAILABLE + ["15"] = 500, -- DATA_LOSS + ["16"] = 401, -- UNAUTHENTICATED } local _M = { diff --git a/apisix/plugins/grpc-transcode/proto.lua b/apisix/plugins/grpc-transcode/proto.lua index c30c17e71855..c2a3cb523394 100644 --- a/apisix/plugins/grpc-transcode/proto.lua +++ b/apisix/plugins/grpc-transcode/proto.lua @@ -159,7 +159,7 @@ end function _M.init() local err - protos, err = core.config.new("/proto", { + protos, err = core.config.new("/protos", { automatic = true, item_schema = core.schema.proto }) diff --git a/apisix/plugins/grpc-transcode/util.lua b/apisix/plugins/grpc-transcode/util.lua index de54cdb87984..dc4526195639 100644 --- a/apisix/plugins/grpc-transcode/util.lua +++ b/apisix/plugins/grpc-transcode/util.lua @@ -147,6 +147,22 @@ function _M.map_message(field, default_values, request_table) if ty ~= "enum" and field_type:sub(1, 1) == "." then if request_table[name] == nil then sub = default_values and default_values[name] + elseif core.table.isarray(request_table[name]) then + local sub_array = core.table.new(#request_table[name], 0) + for i, value in ipairs(request_table[name]) do + local sub_array_obj + if type(value) == "table" then + sub_array_obj, err = _M.map_message(field_type, + default_values and default_values[name], value) + if err then + return nil, err + end + else + sub_array_obj = value + end + sub_array[i] = sub_array_obj + end + sub = sub_array else sub, err = _M.map_message(field_type, default_values and default_values[name], request_table[name]) diff --git a/apisix/plugins/http-logger.lua b/apisix/plugins/http-logger.lua index 3d3ebdfb4e2d..93cd8c9bef3b 100644 --- a/apisix/plugins/http-logger.lua +++ b/apisix/plugins/http-logger.lua @@ -33,7 +33,7 @@ local schema = { type = "object", properties = { uri = core.schema.uri_def, - auth_header = {type = "string", default = ""}, + auth_header = {type = "string"}, timeout = {type = "integer", minimum = 1, default = 3}, include_req_body = {type = "boolean", default = false}, include_resp_body = {type = "boolean", default = false}, diff --git a/apisix/plugins/jwt-auth.lua b/apisix/plugins/jwt-auth.lua index 82c12c95b2c5..36006975f5d3 100644 --- a/apisix/plugins/jwt-auth.lua +++ b/apisix/plugins/jwt-auth.lua @@ -60,7 +60,7 @@ local consumer_schema = { secret = {type = "string"}, algorithm = { type = "string", - enum = {"HS256", "HS512", "RS256"}, + enum = {"HS256", "HS512", "RS256", "ES256"}, default = "HS256" }, exp = {type = "integer", minimum = 1, default = 86400}, @@ -71,6 +71,11 @@ local consumer_schema = { vault = { type = "object", properties = {} + }, + lifetime_grace_period = { + type = "integer", + minimum = 0, + default = 0 } }, dependencies = { @@ -89,7 +94,7 @@ local consumer_schema = { public_key = {type = "string"}, private_key= {type = "string"}, algorithm = { - enum = {"RS256"}, + enum = {"RS256", "ES256"}, }, }, required = {"public_key", "private_key"}, @@ -101,7 +106,7 @@ local consumer_schema = { properties = {} }, algorithm = { - enum = {"RS256"}, + enum = {"RS256", "ES256"}, }, }, required = {"vault"}, @@ -161,7 +166,7 @@ function _M.check_schema(conf, schema_type) return true end - if conf.algorithm ~= "RS256" and not conf.secret then + if conf.algorithm ~= "RS256" and conf.algorithm ~= "ES256" and not conf.secret then conf.secret = ngx_encode_base64(resty_random.bytes(32, true)) elseif conf.base64_secret then if ngx_decode_base64(conf.secret) == nil then @@ -169,7 +174,7 @@ function _M.check_schema(conf, schema_type) end end - if conf.algorithm == "RS256" then + if conf.algorithm == "RS256" or conf.algorithm == "ES256" then -- Possible options are a) both are in vault, b) both in schema -- c) one in schema, another in vault. if not conf.public_key then @@ -235,7 +240,7 @@ local function get_secret(conf, consumer_name) end -local function get_rsa_keypair(conf, consumer_name) +local function get_rsa_or_ecdsa_keypair(conf, consumer_name) local public_key = conf.public_key local private_key = conf.private_key -- if keys are present in conf, no need to query vault (fallback) @@ -304,8 +309,10 @@ local function sign_jwt_with_HS(key, consumer, payload) end -local function sign_jwt_with_RS256(key, consumer, payload) - local public_key, private_key, err = get_rsa_keypair(consumer.auth_conf, consumer.username) +local function sign_jwt_with_RS256_ES256(key, consumer, payload) + local public_key, private_key, err = get_rsa_or_ecdsa_keypair( + consumer.auth_conf, consumer.username + ) if not public_key then core.log.error("failed to sign jwt, err: ", err) core.response.exit(503, "failed to sign jwt") @@ -340,12 +347,12 @@ local function algorithm_handler(consumer, method_only) end return get_secret(consumer.auth_conf, consumer.username) - elseif consumer.auth_conf.algorithm == "RS256" then + elseif consumer.auth_conf.algorithm == "RS256" or consumer.auth_conf.algorithm == "ES256" then if method_only then - return sign_jwt_with_RS256 + return sign_jwt_with_RS256_ES256 end - local public_key, _, err = get_rsa_keypair(consumer.auth_conf, consumer.username) + local public_key, _, err = get_rsa_or_ecdsa_keypair(consumer.auth_conf, consumer.username) return public_key, err end end @@ -389,7 +396,10 @@ function _M.rewrite(conf, ctx) core.log.error("failed to retrieve secrets, err: ", err) return 503, {message = "failed to verify jwt"} end - jwt_obj = jwt:verify_jwt_obj(auth_secret, jwt_obj) + local claim_specs = jwt:get_default_validation_options(jwt_obj) + claim_specs.lifetime_grace_period = consumer.auth_conf.lifetime_grace_period + + jwt_obj = jwt:verify_jwt_obj(auth_secret, jwt_obj, claim_specs) core.log.info("jwt object: ", core.json.delay_encode(jwt_obj)) if not jwt_obj.verified then diff --git a/apisix/plugins/kafka-logger.lua b/apisix/plugins/kafka-logger.lua index 2947d145e468..cb43ae3db24b 100644 --- a/apisix/plugins/kafka-logger.lua +++ b/apisix/plugins/kafka-logger.lua @@ -83,6 +83,11 @@ local schema = { -- in lua-resty-kafka, cluster_name is defined as number -- see https://github.com/doujiang24/lua-resty-kafka#new-1 cluster_name = {type = "integer", minimum = 1, default = 1}, + -- config for lua-resty-kafka, default value is same as lua-resty-kafka + producer_batch_num = {type = "integer", minimum = 1, default = 200}, + producer_batch_size = {type = "integer", minimum = 0, default = 1048576}, + producer_max_buffering = {type = "integer", minimum = 1, default = 50000}, + producer_time_linger = {type = "integer", minimum = 1, default = 1} }, required = {"broker_list", "kafka_topic"} } @@ -208,6 +213,10 @@ function _M.log(conf, ctx) broker_config["request_timeout"] = conf.timeout * 1000 broker_config["producer_type"] = conf.producer_type broker_config["required_acks"] = conf.required_acks + broker_config["batch_num"] = conf.producer_batch_num + broker_config["batch_size"] = conf.producer_batch_size + broker_config["max_buffering"] = conf.producer_max_buffering + broker_config["flush_time"] = conf.producer_time_linger * 1000 local prod, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_producer, broker_list, broker_config, conf.cluster_name) diff --git a/apisix/plugins/ldap-auth.lua b/apisix/plugins/ldap-auth.lua index 3fce91141119..d155696b6337 100644 --- a/apisix/plugins/ldap-auth.lua +++ b/apisix/plugins/ldap-auth.lua @@ -19,7 +19,7 @@ local ngx = ngx local ngx_re = require("ngx.re") local ipairs = ipairs local consumer_mod = require("apisix.consumer") -local lualdap = require("lualdap") +local ldap = require("resty.ldap") local lrucache = core.lrucache.new({ ttl = 300, count = 512 @@ -31,8 +31,9 @@ local schema = { properties = { base_dn = { type = "string" }, ldap_uri = { type = "string" }, - use_tls = { type = "boolean" }, - uid = { type = "string" } + use_tls = { type = "boolean", default = false }, + tls_verify = { type = "boolean", default = false }, + uid = { type = "string", default = "cn" } }, required = {"base_dn","ldap_uri"}, } @@ -136,11 +137,23 @@ function _M.rewrite(conf, ctx) end -- 2. try authenticate the user against the ldap server - local uid = conf.uid or "cn" - - local userdn = uid .. "=" .. user.username .. "," .. conf.base_dn - local ld = lualdap.open_simple (conf.ldap_uri, userdn, user.password, conf.use_tls) - if not ld then + local ldap_host, ldap_port = core.utils.parse_addr(conf.ldap_uri) + + local userdn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn + local ldapconf = { + timeout = 10000, + start_tls = false, + ldap_host = ldap_host, + ldap_port = ldap_port or 389, + ldaps = conf.use_tls, + tls_verify = conf.tls_verify, + base_dn = conf.base_dn, + attribute = conf.uid, + keepalive = 60000, + } + local res, err = ldap.ldap_authenticate(user.username, user.password, ldapconf) + if not res then + core.log.warn("ldap-auth failed: ", err) return 401, { message = "Invalid user authorization" } end diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index 746e474b93d0..0eafd64235b3 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -14,267 +14,24 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local limit_local_new = require("resty.limit.count").new -local core = require("apisix.core") -local apisix_plugin = require("apisix.plugin") -local tab_insert = table.insert -local ipairs = ipairs -local pairs = pairs - +local limit_count = require("apisix.plugins.limit-count.init") local plugin_name = "limit-count" -local limit_redis_cluster_new -local limit_redis_new -do - local redis_src = "apisix.plugins.limit-count.limit-count-redis" - limit_redis_new = require(redis_src).new - - local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" - limit_redis_cluster_new = require(cluster_src).new -end -local lrucache = core.lrucache.new({ - type = 'plugin', serial_creating = true, -}) -local group_conf_lru = core.lrucache.new({ - type = 'plugin', -}) - - -local policy_to_additional_properties = { - redis = { - properties = { - redis_host = { - type = "string", minLength = 2 - }, - redis_port = { - type = "integer", minimum = 1, default = 6379, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_database = { - type = "integer", minimum = 0, default = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - }, - required = {"redis_host"}, - }, - ["redis-cluster"] = { - properties = { - redis_cluster_nodes = { - type = "array", - minItems = 2, - items = { - type = "string", minLength = 2, maxLength = 100 - }, - }, - redis_password = { - type = "string", minLength = 0, - }, - redis_timeout = { - type = "integer", minimum = 1, default = 1000, - }, - redis_cluster_name = { - type = "string", - }, - }, - required = {"redis_cluster_nodes", "redis_cluster_name"}, - }, -} -local schema = { - type = "object", - properties = { - count = {type = "integer", exclusiveMinimum = 0}, - time_window = {type = "integer", exclusiveMinimum = 0}, - group = {type = "string"}, - key = {type = "string", default = "remote_addr"}, - key_type = {type = "string", - enum = {"var", "var_combination", "constant"}, - default = "var", - }, - rejected_code = { - type = "integer", minimum = 200, maximum = 599, default = 503 - }, - rejected_msg = { - type = "string", minLength = 1 - }, - policy = { - type = "string", - enum = {"local", "redis", "redis-cluster"}, - default = "local", - }, - allow_degradation = {type = "boolean", default = false}, - show_limit_quota_header = {type = "boolean", default = true} - }, - required = {"count", "time_window"}, - ["if"] = { - properties = { - policy = { - enum = {"redis"}, - }, - }, - }, - ["then"] = policy_to_additional_properties.redis, - ["else"] = { - ["if"] = { - properties = { - policy = { - enum = {"redis-cluster"}, - }, - }, - }, - ["then"] = policy_to_additional_properties["redis-cluster"], - } -} - - local _M = { version = 0.4, priority = 1002, name = plugin_name, - schema = schema, + schema = limit_count.schema, } -local function group_conf(conf) - return conf -end - - function _M.check_schema(conf) - local ok, err = core.schema.check(schema, conf) - if not ok then - return false, err - end - - if conf.group then - local fields = {} - for k in pairs(schema.properties) do - tab_insert(fields, k) - end - local extra = policy_to_additional_properties[conf.policy] - if extra then - for k in pairs(extra.properties) do - tab_insert(fields, k) - end - end - - local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) - - for _, field in ipairs(fields) do - if not core.table.deep_eq(prev_conf[field], conf[field]) then - core.log.error("previous limit-conn group ", prev_conf.group, - " conf: ", core.json.encode(prev_conf)) - core.log.error("current limit-conn group ", conf.group, - " conf: ", core.json.encode(conf)) - return false, "group conf mismatched" - end - end - end - - return true -end - - -local function create_limit_obj(conf) - core.log.info("create new limit-count plugin instance") - - if not conf.policy or conf.policy == "local" then - return limit_local_new("plugin-" .. plugin_name, conf.count, - conf.time_window) - end - - if conf.policy == "redis" then - return limit_redis_new("plugin-" .. plugin_name, - conf.count, conf.time_window, conf) - end - - if conf.policy == "redis-cluster" then - return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, - conf.time_window, conf) - end - - return nil + return limit_count.check_schema(conf) end function _M.access(conf, ctx) - core.log.info("ver: ", ctx.conf_version) - - local lim, err - if not conf.group then - lim, err = core.lrucache.plugin_ctx(lrucache, ctx, conf.policy, create_limit_obj, conf) - else - lim, err = lrucache(conf.group, "", create_limit_obj, conf) - end - - if not lim then - core.log.error("failed to fetch limit.count object: ", err) - if conf.allow_degradation then - return - end - return 500 - end - - local conf_key = conf.key - local key - if conf.key_type == "var_combination" then - local err, n_resolved - key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) - if err then - core.log.error("could not resolve vars in ", conf_key, " error: ", err) - end - - if n_resolved == 0 then - key = nil - end - elseif conf.key_type == "constant" then - key = conf_key - else - key = ctx.var[conf_key] - end - - if key == nil then - core.log.info("The value of the configured key is empty, use client IP instead") - -- When the value of key is empty, use client IP instead - key = ctx.var["remote_addr"] - end - - -- here we add a separator ':' to mark the boundary of the prefix and the key itself - if not conf.group then - -- Here we use plugin-level conf version to prevent the counter from being resetting - -- because of the change elsewhere. - -- A route which reuses a previous route's ID will inherits its counter. - key = ctx.conf_type .. apisix_plugin.conf_version(conf) .. ':' .. key - else - key = conf.group .. ':' .. key - end - - core.log.info("limit key: ", key) - - local delay, remaining = lim:incoming(key, true) - if not delay then - local err = remaining - if err == "rejected" then - if conf.rejected_msg then - return conf.rejected_code, { error_msg = conf.rejected_msg } - end - return conf.rejected_code - end - - core.log.error("failed to limit count: ", err) - if conf.allow_degradation then - return - end - return 500, {error_msg = "failed to limit count"} - end - - if conf.show_limit_quota_header then - core.response.set_header("X-RateLimit-Limit", conf.count, - "X-RateLimit-Remaining", remaining) - end + return limit_count.rate_limit(conf, ctx) end diff --git a/apisix/plugins/limit-count/init.lua b/apisix/plugins/limit-count/init.lua new file mode 100644 index 000000000000..c9051d2e14ef --- /dev/null +++ b/apisix/plugins/limit-count/init.lua @@ -0,0 +1,310 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local limit_local_new = require("resty.limit.count").new +local core = require("apisix.core") +local apisix_plugin = require("apisix.plugin") +local tab_insert = table.insert +local ipairs = ipairs +local pairs = pairs + + +local plugin_name = "limit-count" +local limit_redis_cluster_new +local limit_redis_new +do + local redis_src = "apisix.plugins.limit-count.limit-count-redis" + limit_redis_new = require(redis_src).new + + local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" + limit_redis_cluster_new = require(cluster_src).new +end +local lrucache = core.lrucache.new({ + type = 'plugin', serial_creating = true, +}) +local group_conf_lru = core.lrucache.new({ + type = 'plugin', +}) + + +local policy_to_additional_properties = { + redis = { + properties = { + redis_host = { + type = "string", minLength = 2 + }, + redis_port = { + type = "integer", minimum = 1, default = 6379, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_database = { + type = "integer", minimum = 0, default = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + }, + required = {"redis_host"}, + }, + ["redis-cluster"] = { + properties = { + redis_cluster_nodes = { + type = "array", + minItems = 2, + items = { + type = "string", minLength = 2, maxLength = 100 + }, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + redis_cluster_name = { + type = "string", + }, + }, + required = {"redis_cluster_nodes", "redis_cluster_name"}, + }, +} +local schema = { + type = "object", + properties = { + count = {type = "integer", exclusiveMinimum = 0}, + time_window = {type = "integer", exclusiveMinimum = 0}, + group = {type = "string"}, + key = {type = "string", default = "remote_addr"}, + key_type = {type = "string", + enum = {"var", "var_combination", "constant"}, + default = "var", + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + policy = { + type = "string", + enum = {"local", "redis", "redis-cluster"}, + default = "local", + }, + allow_degradation = {type = "boolean", default = false}, + show_limit_quota_header = {type = "boolean", default = true} + }, + required = {"count", "time_window"}, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], + } +} + +local schema_copy = core.table.deepcopy(schema) + +local _M = { + schema = schema +} + + +local function group_conf(conf) + return conf +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.group then + -- means that call by some plugin not support + if conf._vid then + return false, "group is not supported" + end + + local fields = {} + -- When the goup field is configured, + -- we will use schema_copy to get the whitelist of properties, + -- so that we can avoid getting injected properties. + for k in pairs(schema_copy.properties) do + tab_insert(fields, k) + end + local extra = policy_to_additional_properties[conf.policy] + if extra then + for k in pairs(extra.properties) do + tab_insert(fields, k) + end + end + + local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) + + for _, field in ipairs(fields) do + if not core.table.deep_eq(prev_conf[field], conf[field]) then + core.log.error("previous limit-conn group ", prev_conf.group, + " conf: ", core.json.encode(prev_conf)) + core.log.error("current limit-conn group ", conf.group, + " conf: ", core.json.encode(conf)) + return false, "group conf mismatched" + end + end + end + + return true +end + + +local function create_limit_obj(conf) + core.log.info("create new limit-count plugin instance") + + if not conf.policy or conf.policy == "local" then + return limit_local_new("plugin-" .. plugin_name, conf.count, + conf.time_window) + end + + if conf.policy == "redis" then + return limit_redis_new("plugin-" .. plugin_name, + conf.count, conf.time_window, conf) + end + + if conf.policy == "redis-cluster" then + return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, + conf.time_window, conf) + end + + return nil +end + + +local function gen_limit_key(conf, ctx, key) + if conf.group then + return conf.group .. ':' .. key + end + + -- here we add a separator ':' to mark the boundary of the prefix and the key itself + -- Here we use plugin-level conf version to prevent the counter from being resetting + -- because of the change elsewhere. + -- A route which reuses a previous route's ID will inherits its counter. + local new_key = ctx.conf_type .. ctx.conf_id .. ':' .. apisix_plugin.conf_version(conf) + .. ':' .. key + if conf._vid then + -- conf has _vid means it's from workflow plugin, add _vid to the key + -- so that the counter is unique per action. + return new_key .. ':' .. conf._vid + end + + return new_key +end + + +local function gen_limit_obj(conf, ctx) + if conf.group then + return lrucache(conf.group, "", create_limit_obj, conf) + end + + local extra_key + if conf._vid then + extra_key = conf.policy .. '#' .. conf._vid + else + extra_key = conf.policy + end + + return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf) +end + + +function _M.rate_limit(conf, ctx) + core.log.info("ver: ", ctx.conf_version) + + local lim, err = gen_limit_obj(conf, ctx) + + if not lim then + core.log.error("failed to fetch limit.count object: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + local conf_key = conf.key + local key + if conf.key_type == "var_combination" then + local err, n_resolved + key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) + if err then + core.log.error("could not resolve vars in ", conf_key, " error: ", err) + end + + if n_resolved == 0 then + key = nil + end + elseif conf.key_type == "constant" then + key = conf_key + else + key = ctx.var[conf_key] + end + + if key == nil then + core.log.info("The value of the configured key is empty, use client IP instead") + -- When the value of key is empty, use client IP instead + key = ctx.var["remote_addr"] + end + + key = gen_limit_key(conf, ctx, key) + core.log.info("limit key: ", key) + + local delay, remaining = lim:incoming(key, true) + if not delay then + local err = remaining + if err == "rejected" then + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code + end + + core.log.error("failed to limit count: ", err) + if conf.allow_degradation then + return + end + return 500, {error_msg = "failed to limit count"} + end + + if conf.show_limit_quota_header then + core.response.set_header("X-RateLimit-Limit", conf.count, + "X-RateLimit-Remaining", remaining) + end +end + + +return _M diff --git a/apisix/plugins/log-rotate.lua b/apisix/plugins/log-rotate.lua index 79459371702e..60b1e3ddb547 100644 --- a/apisix/plugins/log-rotate.lua +++ b/apisix/plugins/log-rotate.lua @@ -21,6 +21,7 @@ local plugin = require("apisix.plugin") local process = require("ngx.process") local signal = require("resty.signal") local shell = require("resty.shell") +local ipairs = ipairs local ngx = ngx local ngx_time = ngx.time local ngx_update_time = ngx.update_time @@ -34,15 +35,14 @@ local str_sub = string.sub local str_find = string.find local str_format = string.format local str_reverse = string.reverse -local tab_insert = table.insert -local tab_sort = table.sort - +local ngx_sleep = require("apisix.core.utils").sleep local local_conf local plugin_name = "log-rotate" local INTERVAL = 60 * 60 -- rotate interval (unit: second) local MAX_KEPT = 24 * 7 -- max number of log files will be kept +local MAX_SIZE = -1 -- max size of file will be rotated local COMPRESSION_FILE_SUFFIX = ".tar.gz" -- compression file suffix local rotate_time local default_logs @@ -123,34 +123,22 @@ local function tab_sort_comp(a, b) end -local function scan_log_folder() - local t = { - access = {}, - error = {}, - } - - local log_dir, access_name = get_log_path_info("access.log") - local _, error_name = get_log_path_info("error.log") +local function scan_log_folder(log_file_name) + local t = {} - if enable_compression then - access_name = access_name .. COMPRESSION_FILE_SUFFIX - error_name = error_name .. COMPRESSION_FILE_SUFFIX - end + local log_dir, _ = get_log_path_info(log_file_name) for file in lfs.dir(log_dir) do local n = get_last_index(file, "__") if n ~= nil then local log_type = file:sub(n + 2) - if log_type == access_name then - tab_insert(t.access, file) - elseif log_type == error_name then - tab_insert(t.error, file) + if log_type == log_file_name then + core.table.insert(t, file) end end end - tab_sort(t.access, tab_sort_comp) - tab_sort(t.error, tab_sort_comp) + core.table.sort(t, tab_sort_comp) return t, log_dir end @@ -219,18 +207,79 @@ local function init_default_logs(logs_info, log_type) end +local function file_size(file) + local attr = lfs.attributes(file) + if attr then + return attr.size + end + return 0 +end + + +local function rotate_file(files, now_time, max_kept) + if core.table.isempty(files) then + return + end + + local new_files = core.table.new(2, 0) + -- rename the log files + for _, file in ipairs(files) do + local now_date = os_date("%Y-%m-%d_%H-%M-%S", now_time) + local new_file = rename_file(default_logs[file], now_date) + if not new_file then + return + end + + core.table.insert(new_files, new_file) + end + + -- send signal to reopen log files + local pid = process.get_master_pid() + core.log.warn("send USR1 signal to master process [", pid, "] for reopening log file") + local ok, err = signal.kill(pid, signal.signum("USR1")) + if not ok then + core.log.error("failed to send USR1 signal for reopening log file: ", err) + end + + if enable_compression then + -- Waiting for nginx reopen files + -- to avoid losing logs during compression + ngx_sleep(0.5) + + for _, new_file in ipairs(new_files) do + compression_file(new_file) + end + end + + for _, file in ipairs(files) do + -- clean the oldest file + local log_list, log_dir = scan_log_folder(file) + for i = max_kept + 1, #log_list do + local path = log_dir .. log_list[i] + local ok, err = os_remove(path) + if err then + core.log.error("remove old log file: ", path, " err: ", err, " res:", ok) + end + end + end +end + + local function rotate() local interval = INTERVAL local max_kept = MAX_KEPT + local max_size = MAX_SIZE local attr = plugin.plugin_attr(plugin_name) if attr then interval = attr.interval or interval max_kept = attr.max_kept or max_kept + max_size = attr.max_size or max_size enable_compression = attr.enable_compression or enable_compression end core.log.info("rotate interval:", interval) core.log.info("rotate max keep:", max_kept) + core.log.info("rotate max size:", max_size) if not default_logs then -- first init default log filepath and filename @@ -248,53 +297,28 @@ local function rotate() return end - if now_time < rotate_time then - -- did not reach the rotate time - core.log.info("rotate time: ", rotate_time, " now time: ", now_time) - return - end + if now_time >= rotate_time then + local files = {DEFAULT_ACCESS_LOG_FILENAME, DEFAULT_ERROR_LOG_FILENAME} + rotate_file(files, now_time, max_kept) - local now_date = os_date("%Y-%m-%d_%H-%M-%S", now_time) - local access_new_file = rename_file(default_logs[DEFAULT_ACCESS_LOG_FILENAME], now_date) - local error_new_file = rename_file(default_logs[DEFAULT_ERROR_LOG_FILENAME], now_date) - if not access_new_file and not error_new_file then -- reset rotate time rotate_time = rotate_time + interval - return - end - core.log.warn("send USR1 signal to master process [", - process.get_master_pid(), "] for reopening log file") - local ok, err = signal.kill(process.get_master_pid(), signal.signum("USR1")) - if not ok then - core.log.error("failed to send USR1 signal for reopening log file: ", err) - end + elseif max_size > 0 then + local access_log_file_size = file_size(default_logs[DEFAULT_ACCESS_LOG_FILENAME].file) + local error_log_file_size = file_size(default_logs[DEFAULT_ERROR_LOG_FILENAME].file) + local files = core.table.new(2, 0) - if enable_compression then - compression_file(access_new_file) - compression_file(error_new_file) - end - - -- clean the oldest file - local log_list, log_dir = scan_log_folder() - for i = max_kept + 1, #log_list.error do - local path = log_dir .. log_list.error[i] - ok, err = os_remove(path) - if err then - core.log.error("remove old error file: ", path, " err: ", err, " res:", ok) + if access_log_file_size >= max_size then + core.table.insert(files, DEFAULT_ACCESS_LOG_FILENAME) end - end - for i = max_kept + 1, #log_list.access do - local path = log_dir .. log_list.access[i] - ok, err = os_remove(path) - if err then - core.log.error("remove old error file: ", path, " err: ", err, " res:", ok) + if error_log_file_size >= max_size then + core.table.insert(files, DEFAULT_ERROR_LOG_FILENAME) end - end - -- reset rotate time - rotate_time = rotate_time + interval + rotate_file(files, now_time, max_kept) + end end diff --git a/apisix/plugins/openfunction.lua b/apisix/plugins/openfunction.lua new file mode 100644 index 000000000000..935d6ebbc540 --- /dev/null +++ b/apisix/plugins/openfunction.lua @@ -0,0 +1,35 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx_encode_base64 = ngx.encode_base64 +local plugin_name, plugin_version, priority = "openfunction", 0.1, -1902 + +local openfunction_authz_schema = { + service_token = {type = "string"} +} + +local function request_processor(conf, ctx, params) + local headers = params.headers or {} + -- setting authorization headers if authorization.service_token exists + if conf.authorization and conf.authorization.service_token then + headers["authorization"] = "Basic " .. ngx_encode_base64(conf.authorization.service_token) + end + + params.headers = headers +end + +return require("apisix.plugins.serverless.generic-upstream")(plugin_name, + plugin_version, priority, request_processor, openfunction_authz_schema) diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua index 4a6dbda1ccec..b472feca0159 100644 --- a/apisix/plugins/openid-connect.lua +++ b/apisix/plugins/openid-connect.lua @@ -73,6 +73,11 @@ local schema = { }, public_key = {type = "string"}, token_signing_alg_values_expected = {type = "string"}, + use_pkce = { + description = "when set to true the PKEC(Proof Key for Code Exchange) will be used.", + type = "boolean", + default = false + }, set_access_token_header = { description = "Whether the access token should be added as a header to the request " .. "for downstream", diff --git a/apisix/plugins/opentelemetry.lua b/apisix/plugins/opentelemetry.lua index ea05b0a8025b..c0e3a74e4cc8 100644 --- a/apisix/plugins/opentelemetry.lua +++ b/apisix/plugins/opentelemetry.lua @@ -53,6 +53,7 @@ local lrucache = core.lrucache.new({ type = 'plugin', count = 128, ttl = 24 * 60 * 60, }) +local asterisk = string.byte("*", 1) local attr_schema = { type = "object", @@ -169,6 +170,13 @@ local schema = { type = "string", minLength = 1, } + }, + additional_header_prefix_attributes = { + type = "array", + items = { + type = "string", + minLength = 1, + } } } } @@ -273,6 +281,27 @@ local function create_tracer_obj(conf) end +local function inject_attributes(attributes, wanted_attributes, source, with_prefix) + for _, key in ipairs(wanted_attributes) do + local is_key_a_match = #key >= 2 and key:byte(-1) == asterisk and with_prefix + + if is_key_a_match then + local prefix = key:sub(0, -2) + for possible_key, value in pairs(source) do + if core.string.has_prefix(possible_key, prefix) then + core.table.insert(attributes, attr.string(possible_key, value)) + end + end + else + local val = source[key] + if val then + core.table.insert(attributes, attr.string(key, val)) + end + end + end +end + + function _M.rewrite(conf, api_ctx) local tracer, err = core.lrucache.plugin_ctx(lrucache, api_ctx, nil, create_tracer_obj, conf) if not tracer then @@ -286,17 +315,22 @@ function _M.rewrite(conf, api_ctx) attr.string("service", api_ctx.service_name), attr.string("route", api_ctx.route_name), } + if conf.additional_attributes then - for _, key in ipairs(conf.additional_attributes) do - local val = api_ctx.var[key] - if val then - core.table.insert(attributes, attr.string(key, val)) - end - end + inject_attributes(attributes, conf.additional_attributes, api_ctx.var, false) + end + + if conf.additional_header_prefix_attributes then + inject_attributes( + attributes, + conf.additional_header_prefix_attributes, + core.request.headers(api_ctx), + true + ) end local ctx = tracer:start(upstream_context, api_ctx.var.request_uri, { - kind = span_kind.client, + kind = span_kind.server, attributes = attributes, }) ctx:attach() diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua index c65a39c48ba2..45ff94c3f631 100644 --- a/apisix/plugins/prometheus/exporter.lua +++ b/apisix/plugins/prometheus/exporter.lua @@ -18,6 +18,7 @@ local base_prometheus = require("prometheus") local core = require("apisix.core") local plugin = require("apisix.plugin") local ipairs = ipairs +local pairs = pairs local ngx = ngx local re_gmatch = ngx.re.gmatch local ffi = require("ffi") @@ -38,6 +39,8 @@ local get_protos = require("apisix.plugins.grpc-transcode.proto").protos local service_fetch = require("apisix.http.service").get local latency_details = require("apisix.utils.log-util").latency_details_in_ms local xrpc = require("apisix.stream.xrpc") +local unpack = unpack +local next = next local ngx_capture @@ -64,6 +67,31 @@ local function gen_arr(...) return inner_tab_arr end +local extra_labels_tbl = {} + +local function extra_labels(name, ctx) + clear_tab(extra_labels_tbl) + + local attr = plugin.plugin_attr("prometheus") + local metrics = attr.metrics + + if metrics and metrics[name] and metrics[name].extra_labels then + local labels = metrics[name].extra_labels + for _, kv in ipairs(labels) do + local val, v = next(kv) + if ctx then + val = ctx.var[v:sub(2)] + if val == nil then + val = "" + end + end + core.table.insert(extra_labels_tbl, val) + end + end + + return extra_labels_tbl +end + local _M = {} @@ -122,6 +150,14 @@ function _M.http_init(prometheus_enabled_in_stream) "Etcd modify index for APISIX keys", {"key"}) + metrics.shared_dict_capacity_bytes = prometheus:gauge("shared_dict_capacity_bytes", + "The capacity of each nginx shared DICT since APISIX start", + {"name"}) + + metrics.shared_dict_free_space_bytes = prometheus:gauge("shared_dict_free_space_bytes", + "The free space of each nginx shared DICT since APISIX start", + {"name"}) + -- per service -- The consumer label indicates the name of consumer corresponds to the @@ -129,15 +165,17 @@ function _M.http_init(prometheus_enabled_in_stream) -- no consumer in request. metrics.status = prometheus:counter("http_status", "HTTP status codes per service in APISIX", - {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node"}) + {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node", + unpack(extra_labels("http_status"))}) metrics.latency = prometheus:histogram("http_latency", "HTTP request latency in milliseconds per service in APISIX", - {"type", "route", "service", "consumer", "node"}, DEFAULT_BUCKETS) + {"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))}, + DEFAULT_BUCKETS) metrics.bandwidth = prometheus:counter("bandwidth", "Total bandwidth in bytes consumed per service in APISIX", - {"type", "route", "service", "consumer", "node"}) + {"type", "route", "service", "consumer", "node", unpack(extra_labels("bandwidth"))}) if prometheus_enabled_in_stream then init_stream_metrics() @@ -199,25 +237,35 @@ function _M.http_log(conf, ctx) metrics.status:inc(1, gen_arr(vars.status, route_id, matched_uri, matched_host, - service_id, consumer_name, balancer_ip)) + service_id, consumer_name, balancer_ip, + unpack(extra_labels("http_status", ctx)))) local latency, upstream_latency, apisix_latency = latency_details(ctx) + local latency_extra_label_values = extra_labels("http_latency", ctx) + metrics.latency:observe(latency, - gen_arr("request", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("request", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) if upstream_latency then metrics.latency:observe(upstream_latency, - gen_arr("upstream", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("upstream", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) end metrics.latency:observe(apisix_latency, - gen_arr("apisix", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("apisix", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) + + local bandwidth_extra_label_values = extra_labels("bandwidth", ctx) metrics.bandwidth:inc(vars.request_length, - gen_arr("ingress", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("ingress", route_id, service_id, consumer_name, balancer_ip, + unpack(bandwidth_extra_label_values))) metrics.bandwidth:inc(vars.bytes_sent, - gen_arr("egress", route_id, service_id, consumer_name, balancer_ip)) + gen_arr("egress", route_id, service_id, consumer_name, balancer_ip, + unpack(bandwidth_extra_label_values))) end @@ -352,6 +400,16 @@ local function etcd_modify_index() end +local function shared_dict_status() + local name = {} + for shared_dict_name, shared_dict in pairs(ngx.shared) do + name[1] = shared_dict_name + metrics.shared_dict_capacity_bytes:set(shared_dict:capacity(), name) + metrics.shared_dict_free_space_bytes:set(shared_dict:free_space(), name) + end +end + + local function collect(ctx, stream_only) if not prometheus or not metrics then core.log.error("prometheus: plugin is not initialized, please make sure ", @@ -359,6 +417,9 @@ local function collect(ctx, stream_only) return 500, {message = "An unexpected error occurred"} end + -- collect ngx.shared.DICT status + shared_dict_status() + -- across all services nginx_status() diff --git a/apisix/plugins/proxy-rewrite.lua b/apisix/plugins/proxy-rewrite.lua index c1d7ec4f5d54..7b9a99f0b872 100644 --- a/apisix/plugins/proxy-rewrite.lua +++ b/apisix/plugins/proxy-rewrite.lua @@ -68,16 +68,16 @@ local schema = { type = "string", pattern = [[^[0-9a-zA-Z-.]+(:\d{1,5})?$]], }, - scheme = { - description = "new scheme for upstream", - type = "string", - enum = {"http", "https"} - }, headers = { description = "new headers for request", type = "object", minProperties = 1, }, + use_real_request_uri_unsafe = { + description = "use real_request_uri instead, THIS IS VERY UNSAFE.", + type = "boolean", + default = false, + }, }, minProperties = 1, } @@ -156,12 +156,11 @@ function _M.rewrite(conf, ctx) ctx.var[upstream_vars[name]] = conf[name] end end - if conf["scheme"] then - ctx.upstream_scheme = conf["scheme"] - end local upstream_uri = ctx.var.uri - if conf.uri ~= nil then + if conf.use_real_request_uri_unsafe then + upstream_uri = ctx.var.real_request_uri + elseif conf.uri ~= nil then upstream_uri = core.utils.resolve_var(conf.uri, ctx.var) elseif conf.regex_uri ~= nil then local uri, _, err = re_sub(ctx.var.uri, conf.regex_uri[1], @@ -177,22 +176,24 @@ function _M.rewrite(conf, ctx) end end - local index = str_find(upstream_uri, "?") - if index then - upstream_uri = core.utils.uri_safe_encode(sub_str(upstream_uri, 1, index-1)) .. - sub_str(upstream_uri, index) - else - upstream_uri = core.utils.uri_safe_encode(upstream_uri) - end - - if ctx.var.is_args == "?" then + if not conf.use_real_request_uri_unsafe then + local index = str_find(upstream_uri, "?") if index then - ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "") + upstream_uri = core.utils.uri_safe_encode(sub_str(upstream_uri, 1, index-1)) .. + sub_str(upstream_uri, index) + else + upstream_uri = core.utils.uri_safe_encode(upstream_uri) + end + + if ctx.var.is_args == "?" then + if index then + ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "") + else + ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "") + end else - ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "") + ctx.var.upstream_uri = upstream_uri end - else - ctx.var.upstream_uri = upstream_uri end if conf.headers then diff --git a/apisix/plugins/redirect.lua b/apisix/plugins/redirect.lua index 6c9a99a1575c..421007d20d82 100644 --- a/apisix/plugins/redirect.lua +++ b/apisix/plugins/redirect.lua @@ -101,6 +101,7 @@ end function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) + if not ok then return false, err end @@ -115,6 +116,10 @@ function _M.check_schema(conf) end end + if conf.http_to_https and conf.append_query_string then + return false, "only one of `http_to_https` and `append_query_string` can be configured." + end + return true end @@ -161,11 +166,6 @@ local function get_port(attr) return port end - port = ssl["listen_port"] - if port then - return port - end - local ports = ssl["listen"] if ports and #ports > 0 then local idx = math_random(1, #ports) @@ -192,8 +192,6 @@ function _M.rewrite(conf, ctx) local proxy_proto = core.request.header(ctx, "X-Forwarded-Proto") local _scheme = proxy_proto or core.request.get_scheme(ctx) if conf.http_to_https and _scheme == "http" then - -- TODO: add test case - -- PR: https://github.com/apache/apisix/pull/1958 if ret_port == nil or ret_port == 443 or ret_port <= 0 or ret_port > 65535 then uri = "https://$host$request_uri" else diff --git a/apisix/plugins/request-id.lua b/apisix/plugins/request-id.lua index 6f1ab7b0cc9e..353bd3f8322e 100644 --- a/apisix/plugins/request-id.lua +++ b/apisix/plugins/request-id.lua @@ -64,7 +64,7 @@ local attr_schema = { local _M = { version = 0.1, - priority = 11010, + priority = 12015, name = plugin_name, schema = schema } diff --git a/apisix/plugins/response-rewrite.lua b/apisix/plugins/response-rewrite.lua index 9a4015fb98bb..4c3487da0686 100644 --- a/apisix/plugins/response-rewrite.lua +++ b/apisix/plugins/response-rewrite.lua @@ -19,6 +19,7 @@ local expr = require("resty.expr.v1") local re_compile = require("resty.core.regex").re_match_compile local plugin_name = "response-rewrite" local ngx = ngx +local re_match = ngx.re.match local re_sub = ngx.re.sub local re_gsub = ngx.re.gsub local pairs = pairs @@ -27,13 +28,63 @@ local type = type local pcall = pcall +local lrucache = core.lrucache.new({ + type = "plugin", +}) + local schema = { type = "object", properties = { headers = { description = "new headers for response", - type = "object", - minProperties = 1, + anyOf = { + { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + {type = "string"}, + {type = "number"}, + } + } + }, + }, + { + properties = { + add = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Set-Cookie: =; Max-Age=" + pattern = "^[^:]+:[^:]+[^/]$" + } + }, + set = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + {type = "string"}, + {type = "number"}, + } + } + }, + }, + remove = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Set-Cookie" + pattern = "^[^:]+$" + } + }, + }, + } + } }, body = { description = "new body for response", @@ -121,6 +172,33 @@ local function vars_matched(conf, ctx) end +local function is_new_headers_conf(headers) + return + (headers.add and type(headers.add) == "table") or + (headers.set and type(headers.set) == "table") or + (headers.remove and type(headers.remove) == "table") +end + + +local function check_set_headers(headers) + for field, value in pairs(headers) do + if type(field) ~= 'string' then + return false, 'invalid type as header field' + end + + if type(value) ~= 'string' and type(value) ~= 'number' then + return false, 'invalid type as header value' + end + + if #field == 0 then + return false, 'invalid field length in header' + end + end + + return true +end + + function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) if not ok then @@ -128,17 +206,10 @@ function _M.check_schema(conf) end if conf.headers then - for field, value in pairs(conf.headers) do - if type(field) ~= 'string' then - return false, 'invalid type as header field' - end - - if type(value) ~= 'string' and type(value) ~= 'number' then - return false, 'invalid type as header value' - end - - if #field == 0 then - return false, 'invalid field length in header' + if not is_new_headers_conf(conf.headers) then + ok, err = check_set_headers(conf.headers) + if not ok then + return false, err end end end @@ -205,17 +276,51 @@ function _M.body_filter(conf, ctx) end if conf.body then - + ngx.arg[2] = true if conf.body_base64 then ngx.arg[1] = ngx.decode_base64(conf.body) else ngx.arg[1] = conf.body end + end +end - ngx.arg[2] = true + +local function create_header_operation(hdr_conf) + local set = {} + local add = {} + if is_new_headers_conf(hdr_conf) then + if hdr_conf.add then + for _, value in ipairs(hdr_conf.add) do + local m, err = re_match(value, [[^([^:\s]+)\s*:\s*([^:]+)$]], "jo") + if not m then + return nil, err + end + core.table.insert_tail(add, m[1], m[2]) + end + end + + if hdr_conf.set then + for field, value in pairs(hdr_conf.set) do + --reform header from object into array, so can avoid use pairs, which is NYI + core.table.insert_tail(set, field, value) + end + end + + else + for field, value in pairs(hdr_conf) do + core.table.insert_tail(set, field, value) + end end + + return { + add = add, + set = set, + remove = hdr_conf.remove or {}, + } end + function _M.header_filter(conf, ctx) ctx.response_rewrite_matched = vars_matched(conf, ctx) if not ctx.response_rewrite_matched then @@ -235,19 +340,28 @@ function _M.header_filter(conf, ctx) return end - --reform header from object into array, so can avoid use pairs, which is NYI - if not conf.headers_arr then - conf.headers_arr = {} + local hdr_op, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + create_header_operation, conf.headers) + if not hdr_op then + core.log.error("failed to create header operation: ", err) + return + end - for field, value in pairs(conf.headers) do - core.table.insert_tail(conf.headers_arr, field, value) - end + local field_cnt = #hdr_op.add + for i = 1, field_cnt, 2 do + local val = core.utils.resolve_var(hdr_op.add[i+1], ctx.var) + core.response.add_header(hdr_op.add[i], val) end - local field_cnt = #conf.headers_arr + local field_cnt = #hdr_op.set for i = 1, field_cnt, 2 do - local val = core.utils.resolve_var(conf.headers_arr[i+1], ctx.var) - ngx.header[conf.headers_arr[i]] = val + local val = core.utils.resolve_var(hdr_op.set[i+1], ctx.var) + core.response.set_header(hdr_op.set[i], val) + end + + local field_cnt = #hdr_op.remove + for i = 1, field_cnt do + core.response.set_header(hdr_op.remove[i], nil) end end diff --git a/apisix/plugins/server-info.lua b/apisix/plugins/server-info.lua index 055bafa2858c..b7cd67793d75 100644 --- a/apisix/plugins/server-info.lua +++ b/apisix/plugins/server-info.lua @@ -261,6 +261,15 @@ function _M.init() return end + + local local_conf = core.config.local_conf() + local deployment_role = core.table.try_read_attr( + local_conf, "deployment", "role") + if deployment_role == "data_plane" then + -- data_plane should not write to etcd + return + end + local attr = plugin.plugin_attr(plugin_name) local ok, err = core.schema.check(attr_schema, attr) if not ok then diff --git a/apisix/plugins/sls-logger.lua b/apisix/plugins/sls-logger.lua index ed34c847ebe2..290bf11917bb 100644 --- a/apisix/plugins/sls-logger.lua +++ b/apisix/plugins/sls-logger.lua @@ -17,6 +17,9 @@ local core = require("apisix.core") local log_util = require("apisix.utils.log-util") local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local plugin = require("apisix.plugin") + + local plugin_name = "sls-logger" local ngx = ngx local rf5424 = require("apisix.plugins.slslog.rfc5424") @@ -127,10 +130,15 @@ end -- log phase in APISIX function _M.log(conf, ctx) - local entry = log_util.get_full_log(ngx, conf) - if not entry.route_id then - core.log.error("failed to obtain the route id for sys logger") - return + local metadata = plugin.plugin_metadata(plugin_name) + local entry + + if metadata and metadata.value.log_format + and core.table.nkeys(metadata.value.log_format) > 0 + then + entry = log_util.get_custom_format_log(ctx, metadata.value.log_format) + else + entry = log_util.get_full_log(ngx, conf) end local json_str, err = core.json.encode(entry) diff --git a/apisix/plugins/syslog.lua b/apisix/plugins/syslog.lua index 7eb4675c0b04..b57f8a1235eb 100644 --- a/apisix/plugins/syslog.lua +++ b/apisix/plugins/syslog.lua @@ -28,8 +28,6 @@ local schema = { properties = { host = {type = "string"}, port = {type = "integer"}, - max_retry_times = {type = "integer", minimum = 1}, - retry_interval = {type = "integer", minimum = 0}, flush_limit = {type = "integer", minimum = 1, default = 4096}, drop_limit = {type = "integer", default = 1048576}, timeout = {type = "integer", minimum = 1, default = 3000}, @@ -59,8 +57,6 @@ function _M.check_schema(conf) return false, err end - conf.max_retry_count = conf.max_retry_times or conf.max_retry_count - conf.retry_delay = conf.retry_interval or conf.retry_delay return true end diff --git a/apisix/plugins/tencent-cloud-cls.lua b/apisix/plugins/tencent-cloud-cls.lua new file mode 100644 index 000000000000..b0726e607eae --- /dev/null +++ b/apisix/plugins/tencent-cloud-cls.lua @@ -0,0 +1,141 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local cls_sdk = require("apisix.plugins.tencent-cloud-cls.cls-sdk") +local plugin = require("apisix.plugin") +local math = math +local ngx = ngx +local pairs = pairs + + +local plugin_name = "tencent-cloud-cls" +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = { + type = "object", + properties = { + cls_host = { type = "string" }, + cls_topic = { type = "string" }, + secret_id = { type = "string" }, + secret_key = { type = "string" }, + sample_ratio = { + type = "number", + minimum = 0.00001, + maximum = 1, + default = 1 + }, + include_req_body = { type = "boolean", default = false }, + include_resp_body = { type = "boolean", default = false }, + global_tag = { type = "object" }, + }, + required = { "cls_host", "cls_topic", "secret_id", "secret_key" } +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = log_util.metadata_schema_log_format, + }, +} + + +local _M = { + version = 0.1, + priority = 397, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +function _M.access(conf, ctx) + ctx.cls_sample = false + if conf.sample_ratio == 1 or math.random() < conf.sample_ratio then + core.log.debug("cls sampled") + ctx.cls_sample = true + return + end +end + + +function _M.body_filter(conf, ctx) + if ctx.cls_sample then + log_util.collect_body(conf, ctx) + end +end + + +function _M.log(conf, ctx) + -- sample if set + if not ctx.cls_sample then + core.log.debug("cls not sampled, skip log") + return + end + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + + local entry + + if metadata and metadata.value.log_format + and core.table.nkeys(metadata.value.log_format) > 0 + then + core.log.debug("using custom format log") + entry = log_util.get_custom_format_log(ctx, metadata.value.log_format) + else + entry = log_util.get_full_log(ngx, conf) + end + + if conf.global_tag then + for k, v in pairs(conf.global_tag) do + entry[k] = v + end + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + local sdk, err = cls_sdk.new(conf.cls_host, conf.cls_topic, conf.secret_id, conf.secret_key) + if err then + core.log.error("init sdk failed err:", err) + return false, err + end + return sdk:send_to_cls(entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/apisix/plugins/tencent-cloud-cls/cls-sdk.lua b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua new file mode 100644 index 000000000000..d2b6e8ad4525 --- /dev/null +++ b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua @@ -0,0 +1,312 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local pb = require "pb" +local protoc = require("protoc").new() +local http = require("resty.http") +local socket = require("socket") +local str_util = require("resty.string") +local core = require("apisix.core") +local core_gethostname = require("apisix.core.utils").gethostname +local json = core.json +local json_encode = json.encode +local ngx = ngx +local ngx_time = ngx.time +local ngx_now = ngx.now +local ngx_sha1_bin = ngx.sha1_bin +local ngx_hmac_sha1 = ngx.hmac_sha1 +local fmt = string.format +local table = table +local concat_tab = table.concat +local clear_tab = table.clear +local new_tab = table.new +local insert_tab = table.insert +local ipairs = ipairs +local pairs = pairs +local type = type +local tostring = tostring +local setmetatable = setmetatable +local pcall = pcall + +-- api doc https://www.tencentcloud.com/document/product/614/16873 +local MAX_SINGLE_VALUE_SIZE = 1 * 1024 * 1024 +local MAX_LOG_GROUP_VALUE_SIZE = 5 * 1024 * 1024 -- 5MB + +local cls_api_path = "/structuredlog" +local auth_expire_time = 60 +local cls_conn_timeout = 1000 +local cls_read_timeout = 10000 +local cls_send_timeout = 10000 + +local headers_cache = {} +local params_cache = { + ssl_verify = false, + headers = headers_cache, +} + + +local function get_ip(hostname) + local _, resolved = socket.dns.toip(hostname) + local ip_list = {} + for _, v in ipairs(resolved.ip) do + insert_tab(ip_list, v) + end + return ip_list +end + +local host_ip = tostring(unpack(get_ip(core_gethostname()))) +local log_group_list = {} +local log_group_list_pb = { + logGroupList = log_group_list, +} + + +local function sha1(msg) + return str_util.to_hex(ngx_sha1_bin(msg)) +end + + +local function sha1_hmac(key, msg) + return str_util.to_hex(ngx_hmac_sha1(key, msg)) +end + + +-- sign algorithm https://cloud.tencent.com/document/product/614/12445 +local function sign(secret_id, secret_key) + local method = "post" + local format_params = "" + local format_headers = "" + local sign_algorithm = "sha1" + local http_request_info = fmt("%s\n%s\n%s\n%s\n", + method, cls_api_path, format_params, format_headers) + local cur_time = ngx_time() + local sign_time = fmt("%d;%d", cur_time, cur_time + auth_expire_time) + local string_to_sign = fmt("%s\n%s\n%s\n", sign_algorithm, sign_time, sha1(http_request_info)) + + local sign_key = sha1_hmac(secret_key, sign_time) + local signature = sha1_hmac(sign_key, string_to_sign) + + local arr = { + "q-sign-algorithm=sha1", + "q-ak=" .. secret_id, + "q-sign-time=" .. sign_time, + "q-key-time=" .. sign_time, + "q-header-list=", + "q-url-param-list=", + "q-signature=" .. signature, + } + + return concat_tab(arr, '&') +end + + +-- normalized log data for CLS API +local function normalize_log(log) + local normalized_log = {} + local log_size = 4 -- empty obj alignment + for k, v in pairs(log) do + local v_type = type(v) + local field = { key = k, value = "" } + if v_type == "string" then + field["value"] = v + elseif v_type == "number" then + field["value"] = tostring(v) + elseif v_type == "table" then + field["value"] = json_encode(v) + else + field["value"] = tostring(v) + core.log.warn("unexpected type " .. v_type .. " for field " .. k) + end + if #field.value > MAX_SINGLE_VALUE_SIZE then + core.log.warn(field.key, " value size over ", MAX_SINGLE_VALUE_SIZE, " , truncated") + field.value = field.value:sub(1, MAX_SINGLE_VALUE_SIZE) + end + insert_tab(normalized_log, field) + log_size = log_size + #field.key + #field.value + end + return normalized_log, log_size +end + + +local _M = { version = 0.1 } +local mt = { __index = _M } + +local pb_state +local function init_pb_state() + local old_pb_state = pb.state(nil) + protoc.reload() + local cls_sdk_protoc = protoc.new() + -- proto file in https://www.tencentcloud.com/document/product/614/42787 + local ok, err = pcall(cls_sdk_protoc.load, cls_sdk_protoc, [[ +package cls; + +message Log +{ + message Content + { + required string key = 1; // Key of each field group + required string value = 2; // Value of each field group + } + required int64 time = 1; // Unix timestamp + repeated Content contents = 2; // Multiple key-value pairs in one log +} + +message LogTag +{ + required string key = 1; + required string value = 2; +} + +message LogGroup +{ + repeated Log logs = 1; // Log array consisting of multiple logs + optional string contextFlow = 2; // This parameter does not take effect currently + optional string filename = 3; // Log filename + optional string source = 4; // Log source, which is generally the machine IP + repeated LogTag logTags = 5; +} + +message LogGroupList +{ + repeated LogGroup logGroupList = 1; // Log group list +} + ]], "tencent-cloud-cls/cls.proto") + if not ok then + cls_sdk_protoc:reset() + pb.state(old_pb_state) + return "failed to load cls.proto: ".. err + end + pb_state = pb.state(old_pb_state) +end + + +function _M.new(host, topic, secret_id, secret_key) + if not pb_state then + local err = init_pb_state() + if err then + return nil, err + end + end + local self = { + host = host, + topic = topic, + secret_id = secret_id, + secret_key = secret_key, + } + return setmetatable(self, mt) +end + + +local function do_request_uri(uri, params) + local client = http:new() + client:set_timeouts(cls_conn_timeout, cls_send_timeout, cls_read_timeout) + local res, err = client:request_uri(uri, params) + client:close() + return res, err +end + + +function _M.send_cls_request(self, pb_obj) + -- recovery of stored pb_store + local old_pb_state = pb.state(pb_state) + local ok, pb_data = pcall(pb.encode, "cls.LogGroupList", pb_obj) + pb_state = pb.state(old_pb_state) + if not ok or not pb_data then + core.log.error("failed to encode LogGroupList, err: ", pb_data) + return false, pb_data + end + + clear_tab(headers_cache) + headers_cache["Host"] = self.host + headers_cache["Content-Type"] = "application/x-protobuf" + headers_cache["Authorization"] = sign(self.secret_id, self.secret_key, cls_api_path) + + -- TODO: support lz4/zstd compress + params_cache.method = "POST" + params_cache.body = pb_data + + local cls_url = "http://" .. self.host .. cls_api_path .. "?topic_id=" .. self.topic + core.log.debug("CLS request URL: ", cls_url) + + local res, err = do_request_uri(cls_url, params_cache) + if not res then + return false, err + end + + if res.status ~= 200 then + err = fmt("got wrong status: %s, headers: %s, body, %s", + res.status, json.encode(res.headers), res.body) + -- 413, 404, 401, 403 are not retryable + if res.status == 413 or res.status == 404 or res.status == 401 or res.status == 403 then + core.log.error(err, ", not retryable") + return true + end + + return false, err + end + + core.log.debug("CLS report success") + return true +end + + +function _M.send_to_cls(self, logs) + clear_tab(log_group_list) + local now = ngx_now() * 1000 + + local total_size = 0 + local format_logs = new_tab(#logs, 0) + -- sums of all value in all LogGroup should be no more than 5MB + -- so send whenever size exceed max size + local group_list_start = 1 + for i = 1, #logs, 1 do + local contents, log_size = normalize_log(logs[i]) + if log_size > MAX_LOG_GROUP_VALUE_SIZE then + core.log.error("size of log is over 5MB, dropped") + goto continue + end + total_size = total_size + log_size + if total_size > MAX_LOG_GROUP_VALUE_SIZE then + insert_tab(log_group_list, { + logs = format_logs, + source = host_ip, + }) + local ok, err = self:send_cls_request(log_group_list_pb) + if not ok then + return false, err, group_list_start + end + group_list_start = i + format_logs = new_tab(#logs - i, 0) + total_size = 0 + clear_tab(log_group_list) + end + insert_tab(format_logs, { + time = now, + contents = contents, + }) + :: continue :: + end + + insert_tab(log_group_list, { + logs = format_logs, + source = host_ip, + }) + local ok, err = self:send_cls_request(log_group_list_pb) + return ok, err, group_list_start +end + +return _M diff --git a/apisix/plugins/traffic-split.lua b/apisix/plugins/traffic-split.lua index 9ba0997f6f08..38e272b7be66 100644 --- a/apisix/plugins/traffic-split.lua +++ b/apisix/plugins/traffic-split.lua @@ -172,11 +172,7 @@ local function set_upstream(upstream_info, ctx) upstream_host = upstream_info.upstream_host, key = upstream_info.key, nodes = new_nodes, - timeout = { - send = upstream_info.timeout and upstream_info.timeout.send or 15, - read = upstream_info.timeout and upstream_info.timeout.read or 15, - connect = upstream_info.timeout and upstream_info.timeout.connect or 15 - } + timeout = upstream_info.timeout, } local ok, err = upstream.check_schema(up_conf) diff --git a/apisix/plugins/wolf-rbac.lua b/apisix/plugins/wolf-rbac.lua index 1a2e9867fce0..a6be6474cd9b 100644 --- a/apisix/plugins/wolf-rbac.lua +++ b/apisix/plugins/wolf-rbac.lua @@ -232,7 +232,7 @@ local function check_url_permission(server, appid, action, resName, client_ip, w } end - if res.status ~= 200 and res.status ~= 401 then + if res.status ~= 200 and res.status >= 500 then return { status = 500, err = 'request to wolf-server failed, status:' .. res.status @@ -314,7 +314,7 @@ function _M.rewrite(conf, ctx) core.response.set_header(prefix .. "UserId", userId) core.response.set_header(prefix .. "Username", username) core.response.set_header(prefix .. "Nickname", ngx.escape_uri(nickname)) - core.request.set_header(ctx, prefix .. "UserId", userId, ctx) + core.request.set_header(ctx, prefix .. "UserId", userId) core.request.set_header(ctx, prefix .. "Username", username) core.request.set_header(ctx, prefix .. "Nickname", ngx.escape_uri(nickname)) end @@ -324,9 +324,7 @@ function _M.rewrite(conf, ctx) core.log.error(" check_url_permission(", core.json.delay_encode(perm_item), ") failed, res: ",core.json.delay_encode(res)) - return 401, fail_response("Invalid user permission", - { username = username, nickname = nickname } - ) + return res.status, fail_response(res.err, { username = username, nickname = nickname }) end core.log.info("wolf-rbac check permission passed") end diff --git a/apisix/plugins/workflow.lua b/apisix/plugins/workflow.lua new file mode 100644 index 000000000000..a586a923b9b7 --- /dev/null +++ b/apisix/plugins/workflow.lua @@ -0,0 +1,151 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local limit_count = require("apisix.plugins.limit-count.init") +local expr = require("resty.expr.v1") +local ipairs = ipairs + +local schema = { + type = "object", + properties = { + rules = { + type = "array", + items = { + type = "object", + properties = { + case = { + type = "array", + items = { + type = "array", + }, + minItems = 1, + }, + actions = { + type = "array", + items = { + type = "array", + minItems = 1 + } + } + }, + required = {"case", "actions"} + } + } + } +} + +local plugin_name = "workflow" + +local _M = { + version = 0.1, + priority = 1006, + name = plugin_name, + schema = schema +} + + +local return_schema = { + type = "object", + properties = { + code = { + type = "integer", + minimum = 100, + maximum = 599 + } + }, + required = {"code"} +} + + +local function check_return_schema(conf) + local ok, err = core.schema.check(return_schema, conf) + if not ok then + return false, err + end + return true +end + + +local function exit(conf) + return conf.code, {error_msg = "rejected by workflow"} +end + + +local function rate_limit(conf, ctx) + return limit_count.rate_limit(conf, ctx) +end + + +local support_action = { + ["return"] = { + handler = exit, + check_schema = check_return_schema, + }, + ["limit-count"] = { + handler = rate_limit, + check_schema = limit_count.check_schema, + } +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + for idx, rule in ipairs(conf.rules) do + local ok, err = expr.new(rule.case) + if not ok then + return false, "failed to validate the 'case' expression: " .. err + end + + local actions = rule.actions + for _, action in ipairs(actions) do + + if not support_action[action[1]] then + return false, "unsupported action: " .. action[1] + end + + -- use the action's idx as an identifier to isolate between confs + action[2]["_vid"] = idx + local ok, err = support_action[action[1]].check_schema(action[2], plugin_name) + if not ok then + return false, "failed to validate the '" .. action[1] .. "' action: " .. err + end + end + end + + return true +end + + +function _M.access(conf, ctx) + local match_result + for _, rule in ipairs(conf.rules) do + local expr, _ = expr.new(rule.case) + match_result = expr:eval(ctx.var) + if match_result then + -- only one action is currently supported + local action = rule.actions[1] + return support_action[action[1]].handler(action[2], ctx) + end + end +end + + +return _M diff --git a/apisix/plugins/zipkin.lua b/apisix/plugins/zipkin.lua index 3fafd29f0e2a..ab284cefe9fa 100644 --- a/apisix/plugins/zipkin.lua +++ b/apisix/plugins/zipkin.lua @@ -127,6 +127,8 @@ function _M.rewrite(plugin_conf, ctx) local b3 = headers["b3"] if b3 then -- don't pass b3 header by default + -- TODO: add an option like 'single_b3_header' so we can adapt to the upstream + -- which doesn't support b3 header without always breaking down the header core.request.set_header(ctx, "b3", nil) local err @@ -158,6 +160,12 @@ function _M.rewrite(plugin_conf, ctx) ctx.opentracing_sample = tracer.sampler:sample(per_req_sample_ratio or conf.sample_ratio) if not ctx.opentracing_sample then core.request.set_header(ctx, "x-b3-sampled", "0") + -- pass the trace ids even the sample is rejected + -- see https://github.com/openzipkin/b3-propagation#why-send- + -- trace-ids-with-a-reject-sampling-decision + core.request.set_header(ctx, "x-b3-traceid", trace_id) + core.request.set_header(ctx, "x-b3-parentspanid", parent_span_id) + core.request.set_header(ctx, "x-b3-spanid", request_span_id) return end diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua index 7d39b62aad76..59e23542d662 100644 --- a/apisix/schema_def.lua +++ b/apisix/schema_def.lua @@ -277,20 +277,6 @@ local health_checker = { } } }, - default = { - type = "http", - healthy = { - http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, - 300, 301, 302, 303, 304, 305, 306, 307, 308 }, - successes = 0, - }, - unhealthy = { - http_statuses = { 429, 500, 503 }, - tcp_failures = 0, - timeouts = 0, - http_failures = 0, - }, - } } }, anyOf = { @@ -610,9 +596,6 @@ _M.route = { service_id = id_schema, upstream_id = id_schema, - service_protocol = { - enum = {"grpc", "http"} - }, enable_websocket = { description = "enable websocket for request", @@ -940,18 +923,27 @@ _M.id_schema = id_schema _M.plugin_injected_schema = { ["$comment"] = "this is a mark for our injected plugin schema", - disable = { - type = "boolean", - }, _meta = { type = "object", properties = { + disable = { + type = "boolean", + }, error_response = { oneOf = { { type = "string" }, { type = "object" }, } }, + priority = { + description = "priority of plugins by customized order", + type = "integer", + }, + filter = { + description = "filter determines whether the plugin ".. + "needs to be executed at runtime", + type = "array", + } } } } diff --git a/apisix/ssl/router/radixtree_sni.lua b/apisix/ssl/router/radixtree_sni.lua index 891d8d21dd4c..28648f8c9b18 100644 --- a/apisix/ssl/router/radixtree_sni.lua +++ b/apisix/ssl/router/radixtree_sni.lua @@ -247,7 +247,7 @@ end function _M.init_worker() local err - ssl_certificates, err = core.config.new("/ssl", { + ssl_certificates, err = core.config.new("/ssls", { automatic = true, item_schema = core.schema.ssl, checker = function (item, schema_type) @@ -264,7 +264,7 @@ end function _M.get_by_id(ssl_id) local ssl - local ssls = core.config.fetch_created_obj("/ssl") + local ssls = core.config.fetch_created_obj("/ssls") if ssls then ssl = ssls:get(tostring(ssl_id)) end diff --git a/apisix/stream/plugins/mqtt-proxy.lua b/apisix/stream/plugins/mqtt-proxy.lua index 2c421dcc2c49..f075e204db95 100644 --- a/apisix/stream/plugins/mqtt-proxy.lua +++ b/apisix/stream/plugins/mqtt-proxy.lua @@ -15,8 +15,6 @@ -- limitations under the License. -- local core = require("apisix.core") -local upstream = require("apisix.upstream") -local ipmatcher = require("resty.ipmatcher") local bit = require("bit") local ngx = ngx local str_byte = string.byte @@ -32,20 +30,7 @@ local schema = { type = "object", properties = { protocol_name = {type = "string"}, - protocol_level = {type = "integer"}, - upstream = { - description = "Deprecated. We should configure upstream outside of the plugin", - type = "object", - properties = { - ip = {type = "string"}, -- deprecated, use "host" instead - host = {type = "string"}, - port = {type = "number"}, - }, - oneOf = { - {required = {"host", "port"}}, - {required = {"ip", "port"}}, - }, - } + protocol_level = {type = "integer"} }, required = {"protocol_name", "protocol_level"}, } @@ -189,48 +174,6 @@ function _M.preread(conf, ctx) if res.client_id ~= "" then ctx.mqtt_client_id = res.client_id end - - if not conf.upstream then - return - end - - local host = conf.upstream.host - if not host then - host = conf.upstream.ip - end - - if conf.host_is_domain == nil then - conf.host_is_domain = not ipmatcher.parse_ipv4(host) - and not ipmatcher.parse_ipv6(host) - end - - if conf.host_is_domain then - local ip, err = core.resolver.parse_domain(host) - if not ip then - core.log.error("failed to parse host ", host, ", err: ", err) - return 503 - end - - host = ip - end - - local up_conf = { - type = "roundrobin", - nodes = { - {host = host, port = conf.upstream.port, weight = 1}, - } - } - - local ok, err = upstream.check_schema(up_conf) - if not ok then - core.log.error("failed to check schema ", core.json.delay_encode(up_conf), - ", err: ", err) - return 503 - end - - local matched_route = ctx.matched_route - upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id, - ctx.conf_version, up_conf) return end diff --git a/apisix/upstream.lua b/apisix/upstream.lua index 0162ad8137ed..a2a0cd3e899a 100644 --- a/apisix/upstream.lua +++ b/apisix/upstream.lua @@ -19,7 +19,6 @@ local core = require("apisix.core") local discovery = require("apisix.discovery.init").discovery local upstream_util = require("apisix.utils.upstream") local apisix_ssl = require("apisix.ssl") -local balancer = require("ngx.balancer") local error = error local tostring = tostring local ipairs = ipairs @@ -430,7 +429,7 @@ local function check_upstream_conf(in_dp, conf) local ssl_id = conf.tls and conf.tls.client_cert_id if ssl_id then - local key = "/ssl/" .. ssl_id + local key = "/ssls/" .. ssl_id local res, err = core.etcd.get(key) if not res then return nil, "failed to fetch ssl info by " @@ -458,12 +457,6 @@ local function check_upstream_conf(in_dp, conf) end if is_http then - if conf.pass_host == "node" and conf.nodes and - not balancer.recreate_request and core.table.nkeys(conf.nodes) ~= 1 - then - return false, "only support single node for `node` mode currently" - end - if conf.pass_host == "rewrite" and (conf.upstream_host == nil or conf.upstream_host == "") then diff --git a/benchmark/run.sh b/benchmark/run.sh index 8bb1047fba17..570d8e8fb7d9 100755 --- a/benchmark/run.sh +++ b/benchmark/run.sh @@ -86,7 +86,7 @@ sleep 3 ############################################# echo -e "\n\napisix: $worker_cnt worker + $upstream_cnt upstream + no plugin" -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { @@ -112,7 +112,7 @@ sleep 1 ############################################# echo -e "\n\napisix: $worker_cnt worker + $upstream_cnt upstream + 2 plugins (limit-count + prometheus)" -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { diff --git a/bin/apisix b/bin/apisix index 4583fd1b52a0..780764ae9509 100755 --- a/bin/apisix +++ b/bin/apisix @@ -42,10 +42,6 @@ if [[ -e $OR_EXEC && "$OR_VER" -ge 119 ]]; then # use the luajit of openresty echo "$LUAJIT_BIN $APISIX_LUA $*" exec $LUAJIT_BIN $APISIX_LUA $* -elif [[ "$LUA_VERSION" =~ "Lua 5.1" ]]; then - # OpenResty version is < 1.19, use Lua 5.1 by default - echo "lua $APISIX_LUA $*" - exec lua $APISIX_LUA $* else - echo "ERROR: Please check the version of OpenResty and Lua, OpenResty 1.19+ + LuaJIT or OpenResty before 1.19 + Lua 5.1 is required for Apache APISIX." + echo "ERROR: Please check the version of OpenResty and Lua, OpenResty 1.19+ + LuaJIT is required for Apache APISIX." fi diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh index 0f066e6c1520..543e54514be5 100755 --- a/ci/centos7-ci.sh +++ b/ci/centos7-ci.sh @@ -35,10 +35,7 @@ install_dependencies() { ./utils/linux-install-luarocks.sh # install etcdctl - wget https://github.com/etcd-io/etcd/releases/download/v3.4.18/etcd-v3.4.18-linux-amd64.tar.gz - tar xf etcd-v3.4.18-linux-amd64.tar.gz - cp ./etcd-v3.4.18-linux-amd64/etcdctl /usr/local/bin/ - rm -rf etcd-v3.4.18-linux-amd64 + ./utils/linux-install-etcd-client.sh # install vault cli capabilities install_vault_cli diff --git a/ci/init-last-test-service.sh b/ci/init-last-test-service.sh new file mode 100755 index 000000000000..f49d4a747528 --- /dev/null +++ b/ci/init-last-test-service.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test2 +docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 3 --topic test3 +docker exec -i apache-apisix_kafka-server2_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4 +docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test-consumer + +# create messages for test-consumer +for i in `seq 30` +do + docker exec -i apache-apisix_kafka-server1_1 bash -c "echo "testmsg$i" | /opt/bitnami/kafka/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic test-consumer" + echo "Produces messages to the test-consumer topic, msg: testmsg$i" +done +echo "Kafka service initialization completed" diff --git a/ci/linux-ci-init-service.sh b/ci/init-plugin-test-service.sh similarity index 85% rename from ci/linux-ci-init-service.sh rename to ci/init-plugin-test-service.sh index 73477a5febca..5f468502304d 100755 --- a/ci/linux-ci-init-service.sh +++ b/ci/init-plugin-test-service.sh @@ -19,15 +19,6 @@ docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test2 docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 3 --topic test3 docker exec -i apache-apisix_kafka-server2_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4 -docker exec -i apache-apisix_kafka-server1_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test-consumer - -# create messages for test-consumer -for i in `seq 30` -do - docker exec -i apache-apisix_kafka-server1_1 bash -c "echo "testmsg$i" | /opt/bitnami/kafka/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic test-consumer" - echo "Produces messages to the test-consumer topic, msg: testmsg$i" -done -echo "Kafka service initialization completed" # prepare openwhisk env docker pull openwhisk/action-nodejs-v14:nightly diff --git a/ci/linux_openresty_1_17_runner.sh b/ci/linux_openresty_1_19_runner.sh similarity index 96% rename from ci/linux_openresty_1_17_runner.sh rename to ci/linux_openresty_1_19_runner.sh index b0cbde775e2d..ed1751308926 100755 --- a/ci/linux_openresty_1_17_runner.sh +++ b/ci/linux_openresty_1_19_runner.sh @@ -17,5 +17,5 @@ # -export OPENRESTY_VERSION=1.17.8.2 +export OPENRESTY_VERSION=1.19.3.2 . ./ci/linux_openresty_common_runner.sh diff --git a/ci/pod/docker-compose.common.yml b/ci/pod/docker-compose.common.yml index ecbdfcaf0a47..9e0394a48bd2 100644 --- a/ci/pod/docker-compose.common.yml +++ b/ci/pod/docker-compose.common.yml @@ -31,7 +31,7 @@ services: - "3380:2380" etcd: - image: bitnami/etcd:3.4.18 + image: bitnami/etcd:3.5.4 restart: unless-stopped env_file: - ci/pod/etcd/env/common.env @@ -42,7 +42,7 @@ services: - "2380:2380" etcd_tls: - image: bitnami/etcd:3.4.18 + image: bitnami/etcd:3.5.4 restart: unless-stopped env_file: - ci/pod/etcd/env/common.env @@ -58,7 +58,7 @@ services: - ./t/certs:/certs etcd_mtls: - image: bitnami/etcd:3.4.18 + image: bitnami/etcd:3.5.4 restart: unless-stopped env_file: - ci/pod/etcd/env/common.env diff --git a/ci/pod/docker-compose.yml b/ci/pod/docker-compose.first.yml similarity index 55% rename from ci/pod/docker-compose.yml rename to ci/pod/docker-compose.first.yml index 68dab85c539b..a13ad3cf1586 100644 --- a/ci/pod/docker-compose.yml +++ b/ci/pod/docker-compose.first.yml @@ -18,95 +18,6 @@ version: "3.8" services: - ## Redis - apisix_redis: - # The latest image is the latest stable version - image: redis:latest - restart: unless-stopped - ports: - - "6379:6379" - networks: - apisix_net: - - - ## keycloak - apisix_keycloak: - image: sshniro/keycloak-apisix:1.0.0 - environment: - KEYCLOAK_USER: admin - KEYCLOAK_PASSWORD: 123456 - restart: unless-stopped - ports: - - "8090:8080" - - "8443:8443" - networks: - apisix_net: - - - ## kafka-cluster - zookeeper-server1: - image: bitnami/zookeeper:3.6.0 - env_file: - - ci/pod/kafka/zookeeper-server/env/common.env - restart: unless-stopped - ports: - - "2181:2181" - networks: - kafka_net: - - zookeeper-server2: - image: bitnami/zookeeper:3.6.0 - env_file: - - ci/pod/kafka/zookeeper-server/env/common.env - restart: unless-stopped - ports: - - "12181:12181" - networks: - kafka_net: - - kafka-server1: - image: bitnami/kafka:2.8.1 - env_file: - - ci/pod/kafka/kafka-server/env/common.env - environment: - KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181 - restart: unless-stopped - ports: - - "9092:9092" - - "9093:9093" - - "9094:9094" - depends_on: - - zookeeper-server1 - - zookeeper-server2 - networks: - kafka_net: - volumes: - - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro - - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro - - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro - - kafka-server2: - image: bitnami/kafka:2.8.1 - env_file: - - ci/pod/kafka/kafka-server/env/common.env - environment: - KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181 - restart: unless-stopped - ports: - - "19092:9092" - - "19093:9093" - - "19094:9094" - depends_on: - - zookeeper-server1 - - zookeeper-server2 - networks: - kafka_net: - volumes: - - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro - - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro - - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro - - ## Eureka eureka: image: bitinit/eureka @@ -116,19 +27,6 @@ services: ports: - "8761:8761" - - ## SkyWalking - skywalking: - image: apache/skywalking-oap-server:8.7.0-es6 - restart: unless-stopped - ports: - - "1234:1234" - - "11800:11800" - - "12800:12800" - networks: - skywalk_net: - - ## Consul consul_1: image: consul:1.7 @@ -148,37 +46,6 @@ services: networks: consul_net: - - ## HashiCorp Vault - vault: - image: vault:1.9.0 - container_name: vault - restart: unless-stopped - ports: - - "8200:8200" - cap_add: - - IPC_LOCK - environment: - VAULT_DEV_ROOT_TOKEN_ID: root - VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200 - command: [ "vault", "server", "-dev" ] - networks: - vault_net: - - - ## OpenLDAP - openldap: - image: bitnami/openldap:2.5.8 - environment: - LDAP_ADMIN_USERNAME: amdin - LDAP_ADMIN_PASSWORD: adminpassword - LDAP_USERS: user01,user02 - LDAP_PASSWORDS: password1,password2 - ports: - - "1389:1389" - - "1636:1636" - - ## Nacos cluster nacos_auth: hostname: nacos1 @@ -368,69 +235,7 @@ services: networks: nacos_net: - rocketmq_namesrv: - image: apacherocketmq/rocketmq:4.6.0 - container_name: rmqnamesrv - restart: unless-stopped - ports: - - "9876:9876" - command: sh mqnamesrv - networks: - rocketmq_net: - - rocketmq_broker: - image: apacherocketmq/rocketmq:4.6.0 - container_name: rmqbroker - restart: unless-stopped - ports: - - "10909:10909" - - "10911:10911" - - "10912:10912" - depends_on: - - rocketmq_namesrv - command: sh mqbroker -n rocketmq_namesrv:9876 -c ../conf/broker.conf - networks: - rocketmq_net: - - # Open Policy Agent - opa: - image: openpolicyagent/opa:0.35.0 - restart: unless-stopped - ports: - - 8181:8181 - command: run -s /example.rego /echo.rego /data.json - volumes: - - type: bind - source: ./ci/pod/opa/example.rego - target: /example.rego - - type: bind - source: ./ci/pod/opa/echo.rego - target: /echo.rego - - type: bind - source: ./ci/pod/opa/data.json - target: /data.json - networks: - opa_net: - - # Splunk HEC Logging Service - splunk: - image: splunk/splunk:8.2.3 - restart: unless-stopped - ports: - - "18088:8088" - environment: - SPLUNK_PASSWORD: "ApacheAPISIX@666" - SPLUNK_START_ARGS: "--accept-license" - SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C" - SPLUNK_HEC_SSL: "False" - networks: - apisix_net: consul_net: - kafka_net: nacos_net: - skywalk_net: - rocketmq_net: - vault_net: - opa_net: diff --git a/ci/pod/docker-compose.last.yml b/ci/pod/docker-compose.last.yml new file mode 100644 index 000000000000..dbc835fdeaf7 --- /dev/null +++ b/ci/pod/docker-compose.last.yml @@ -0,0 +1,97 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3.8" + +services: + ## Redis + apisix_redis: + # The latest image is the latest stable version + image: redis:latest + restart: unless-stopped + ports: + - "6379:6379" + networks: + apisix_net: + + ## kafka-cluster + zookeeper-server1: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "2181:2181" + networks: + kafka_net: + + zookeeper-server2: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "12181:12181" + networks: + kafka_net: + + kafka-server1: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/last.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181 + restart: unless-stopped + ports: + - "9092:9092" + - "9093:9093" + - "9094:9094" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + volumes: + - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro + + kafka-server2: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/last.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181 + restart: unless-stopped + ports: + - "19092:9092" + - "19093:9093" + - "19094:9094" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + volumes: + - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro + + +networks: + apisix_net: + kafka_net: diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml new file mode 100644 index 000000000000..18d59a042433 --- /dev/null +++ b/ci/pod/docker-compose.plugin.yml @@ -0,0 +1,263 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3.8" + +services: + ## Redis + apisix_redis: + # The latest image is the latest stable version + image: redis:latest + restart: unless-stopped + ports: + - "6379:6379" + networks: + apisix_net: + + + ## keycloak + apisix_keycloak: + image: sshniro/keycloak-apisix:1.0.0 + environment: + KEYCLOAK_USER: admin + KEYCLOAK_PASSWORD: 123456 + restart: unless-stopped + ports: + - "8090:8080" + - "8443:8443" + networks: + apisix_net: + + + ## kafka-cluster + zookeeper-server1: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "2181:2181" + networks: + kafka_net: + + zookeeper-server2: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "12181:12181" + networks: + kafka_net: + + kafka-server1: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/common.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181 + restart: unless-stopped + ports: + - "9092:9092" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + + kafka-server2: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/common.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181 + restart: unless-stopped + ports: + - "19092:9092" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + + ## SkyWalking + skywalking: + image: apache/skywalking-oap-server:8.7.0-es6 + restart: unless-stopped + ports: + - "1234:1234" + - "11800:11800" + - "12800:12800" + networks: + skywalk_net: + + ## HashiCorp Vault + vault: + image: vault:1.9.0 + container_name: vault + restart: unless-stopped + ports: + - "8200:8200" + cap_add: + - IPC_LOCK + environment: + VAULT_DEV_ROOT_TOKEN_ID: root + VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200 + command: [ "vault", "server", "-dev" ] + networks: + vault_net: + + + ## OpenLDAP + openldap: + image: bitnami/openldap:2.5.8 + environment: + - LDAP_ADMIN_USERNAME=amdin + - LDAP_ADMIN_PASSWORD=adminpassword + - LDAP_USERS=user01,user02 + - LDAP_PASSWORDS=password1,password2 + - LDAP_ENABLE_TLS=yes + - LDAP_TLS_CERT_FILE=/certs/localhost_slapd_cert.pem + - LDAP_TLS_KEY_FILE=/certs/localhost_slapd_key.pem + - LDAP_TLS_CA_FILE=/certs/apisix.crt + ports: + - "1389:1389" + - "1636:1636" + volumes: + - ./t/certs:/certs + + + rocketmq_namesrv: + image: apacherocketmq/rocketmq:4.6.0 + container_name: rmqnamesrv + restart: unless-stopped + ports: + - "9876:9876" + command: sh mqnamesrv + networks: + rocketmq_net: + + rocketmq_broker: + image: apacherocketmq/rocketmq:4.6.0 + container_name: rmqbroker + restart: unless-stopped + ports: + - "10909:10909" + - "10911:10911" + - "10912:10912" + depends_on: + - rocketmq_namesrv + command: sh mqbroker -n rocketmq_namesrv:9876 -c ../conf/broker.conf + networks: + rocketmq_net: + + # Open Policy Agent + opa: + image: openpolicyagent/opa:0.35.0 + restart: unless-stopped + ports: + - 8181:8181 + command: run -s /example.rego /echo.rego /data.json + volumes: + - type: bind + source: ./ci/pod/opa/example.rego + target: /example.rego + - type: bind + source: ./ci/pod/opa/echo.rego + target: /echo.rego + - type: bind + source: ./ci/pod/opa/data.json + target: /data.json + networks: + opa_net: + + # Splunk HEC Logging Service + splunk: + image: splunk/splunk:8.2.3 + restart: unless-stopped + ports: + - "18088:8088" + environment: + SPLUNK_PASSWORD: "ApacheAPISIX@666" + SPLUNK_START_ARGS: "--accept-license" + SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C" + SPLUNK_HEC_SSL: "False" + + # Elasticsearch Logger Service + elasticsearch-noauth: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1 + restart: unless-stopped + ports: + - "9200:9200" + - "9300:9300" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + xpack.security.enabled: 'false' + + elasticsearch-auth: + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1 + restart: unless-stopped + ports: + - "9201:9201" + - "9301:9301" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + ELASTIC_USERNAME: elastic + ELASTIC_PASSWORD: 123456 + http.port: 9201 + transport.tcp.port: 9301 + xpack.security.enabled: 'true' + + + # The function services of OpenFunction + test-header: + image: test-header-image:latest + restart: unless-stopped + ports: + - "30583:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + test-uri: + image: test-uri-image:latest + restart: unless-stopped + ports: + - "30584:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + test-body: + image: test-body-image:latest + restart: unless-stopped + ports: + - "30585:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + +networks: + apisix_net: + kafka_net: + skywalk_net: + rocketmq_net: + vault_net: + opa_net: diff --git a/ci/pod/kafka/kafka-server/env/common.env b/ci/pod/kafka/kafka-server/env/common.env index adc9d7cad1f8..06200b9b0042 100644 --- a/ci/pod/kafka/kafka-server/env/common.env +++ b/ci/pod/kafka/kafka-server/env/common.env @@ -1,8 +1,3 @@ ALLOW_PLAINTEXT_LISTENER=yes -KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false -KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093,SASL_PLAINTEXT://0.0.0.0:9094 -KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094 -KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM= -KAFKA_CFG_SSL_KEYSTORE_LOCATION=/opt/bitnami/kafka/config/certs/kafka.keystore.jks -KAFKA_CFG_SSL_KEYSTORE_PASSWORD=changeit -KAFKA_CFG_SSL_KEY_PASSWORD=changeit +KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true +KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 diff --git a/ci/pod/kafka/kafka-server/env/last.env b/ci/pod/kafka/kafka-server/env/last.env new file mode 100644 index 000000000000..adc9d7cad1f8 --- /dev/null +++ b/ci/pod/kafka/kafka-server/env/last.env @@ -0,0 +1,8 @@ +ALLOW_PLAINTEXT_LISTENER=yes +KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false +KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093,SASL_PLAINTEXT://0.0.0.0:9094 +KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094 +KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM= +KAFKA_CFG_SSL_KEYSTORE_LOCATION=/opt/bitnami/kafka/config/certs/kafka.keystore.jks +KAFKA_CFG_SSL_KEYSTORE_PASSWORD=changeit +KAFKA_CFG_SSL_KEY_PASSWORD=changeit diff --git a/ci/pod/nacos/service/Dockerfile b/ci/pod/nacos/service/Dockerfile index f76ba1585411..d279c74972cc 100644 --- a/ci/pod/nacos/service/Dockerfile +++ b/ci/pod/nacos/service/Dockerfile @@ -15,7 +15,7 @@ # limitations under the License. # -FROM java +FROM eclipse-temurin:8 ENV SUFFIX_NUM=${SUFFIX_NUM:-1} ENV NACOS_ADDR=${NACOS_ADDR:-127.0.0.1:8848} diff --git a/ci/pod/openfunction/build-function-image.sh b/ci/pod/openfunction/build-function-image.sh new file mode 100644 index 000000000000..3ad08447d090 --- /dev/null +++ b/ci/pod/openfunction/build-function-image.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +wget https://github.com/buildpacks/pack/releases/download/v0.27.0/pack-v0.27.0-linux.tgz +tar -zxvf pack-v0.27.0-linux.tgz + +# please update function-example/*/hello.go if you want to update function +./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" +./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" +./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" diff --git a/ci/pod/openfunction/function-example/test-body/go.mod b/ci/pod/openfunction/function-example/test-body/go.mod new file mode 100644 index 000000000000..6242ced93022 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-body/go.mod @@ -0,0 +1,5 @@ +module example.com/hello + +go 1.16 + +require github.com/OpenFunction/functions-framework-go v0.3.0 diff --git a/ci/pod/openfunction/function-example/test-body/hello.go b/ci/pod/openfunction/function-example/test-body/hello.go new file mode 100644 index 000000000000..ffa7fad6bd7e --- /dev/null +++ b/ci/pod/openfunction/function-example/test-body/hello.go @@ -0,0 +1,36 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + "net/http" + "io/ioutil" + "github.com/OpenFunction/functions-framework-go/functions" +) + +func init() { + functions.HTTP("HelloWorld", HelloWorld) +} + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + body,_ := ioutil.ReadAll(r.Body) + fmt.Fprintf(w, "Hello, %s!\n", string(body)) +} diff --git a/ci/pod/openfunction/function-example/test-header/go.mod b/ci/pod/openfunction/function-example/test-header/go.mod new file mode 100644 index 000000000000..32c2cadc95a8 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-header/go.mod @@ -0,0 +1,3 @@ +module example.com/hello + +go 1.16 diff --git a/ci/pod/openfunction/function-example/test-header/hello.go b/ci/pod/openfunction/function-example/test-header/hello.go new file mode 100644 index 000000000000..418f9fb80943 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-header/hello.go @@ -0,0 +1,30 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + "net/http" +) + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + header := r.Header + fmt.Fprintf(w, "%s", header["Authorization"]) +} diff --git a/ci/pod/openfunction/function-example/test-uri/go.mod b/ci/pod/openfunction/function-example/test-uri/go.mod new file mode 100644 index 000000000000..c259999831bd --- /dev/null +++ b/ci/pod/openfunction/function-example/test-uri/go.mod @@ -0,0 +1,5 @@ +module example.com/hello + +go 1.17 + +require github.com/OpenFunction/functions-framework-go v0.4.0 diff --git a/ci/pod/openfunction/function-example/test-uri/hello.go b/ci/pod/openfunction/function-example/test-uri/hello.go new file mode 100644 index 000000000000..d726b8e59457 --- /dev/null +++ b/ci/pod/openfunction/function-example/test-uri/hello.go @@ -0,0 +1,38 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + ofctx "github.com/OpenFunction/functions-framework-go/context" + "net/http" + + "github.com/OpenFunction/functions-framework-go/functions" +) + +func init() { + functions.HTTP("HelloWorld", HelloWorld, + functions.WithFunctionPath("/{greeting}")) +} + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + vars := ofctx.VarsFromCtx(r.Context()) + fmt.Fprintf(w, "Hello, %s!\n", vars["greeting"]) +} diff --git a/conf/config-default.yaml b/conf/config-default.yaml old mode 100644 new mode 100755 index c33e4a731231..96a0e692b42f --- a/conf/config-default.yaml +++ b/conf/config-default.yaml @@ -38,8 +38,8 @@ apisix: # yaml: fetch the config value from local yaml file `/your_path/conf/apisix.yaml` #proxy_protocol: # Proxy Protocol configuration - #listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and port_admin. - # This port can only receive http request with proxy protocol, but node_listen & port_admin + #listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and admin_listen. + # This port can only receive http request with proxy protocol, but node_listen & admin_listen # can only receive http request. If you enable proxy protocol, you must use this port to # receive http request with proxy protocol #listen_https_port: 9182 # The port with proxy protocol for https @@ -74,17 +74,19 @@ apisix: allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default. #- "::/64" - #admin_listen: # use a separate port - # ip: 127.0.0.1 # Specific IP, if not set, the default value is `0.0.0.0`. - # port: 9180 - #port_admin: 9180 # Not recommend: This parameter should be set via the `admin_listen`. + admin_listen: # use a separate port + ip: 0.0.0.0 # Specific IP, if not set, the default value is `0.0.0.0`. + port: 9180 # Specific port, which must be different from node_listen's port. + #https_admin: true # enable HTTPS when use a separate port for Admin API. # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate. - admin_api_mtls: # Depends on `port_admin` and `https_admin`. + admin_api_mtls: # Depends on `admin_listen` and `https_admin`. admin_ssl_cert: "" # Path of your self-signed server side cert. admin_ssl_cert_key: "" # Path of your self-signed server side key. admin_ssl_ca_cert: "" # Path of your self-signed ca cert.The CA is used to sign all admin api callers' certificates. + admin_api_version: v3 # The version of admin api, latest version is v3. + # Default token when use API to call for Admin API. # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API. # Disabling this configuration item means that the Admin API does not @@ -131,14 +133,11 @@ apisix: ssl: enable: true listen: # APISIX listening port in https. - - 9443 - # - port: 9444 - # enable_http2: true # If not set, the default value is `false`. + - port: 9443 + enable_http2: true # - ip: 127.0.0.3 # Specific IP, If not set, the default value is `0.0.0.0`. # port: 9445 # enable_http2: true - enable_http2: true # Not recommend: This parameter should be set via the `listen`. - # listen_port: 9443 # Not recommend: This parameter should be set via the `listen`. #ssl_trusted_certificate: /path/to/ca-cert # Specifies a file path with trusted CA certificates in the PEM format # used to verify the certificate when APISIX needs to do SSL/TLS handshaking # with external services (e.g. etcd) @@ -276,27 +275,6 @@ nginx_config: # config for render the template to generate n kubernetes: 1m tars: 1m -etcd: - host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. - - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, - # e.g. https://127.0.0.1:2379. - prefix: /apisix # apisix configurations prefix - timeout: 30 # 30 seconds - #resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter - #health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds - health_check_retry: 2 # etcd retry time that only affects the health check, default 2 - #user: root # root username for etcd - #password: 5tHkHhYkjr6cQY # root password for etcd - tls: - # To enable etcd client certificate you need to build APISIX-Base, see - # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment? - #cert: /path/to/cert # path of certificate used by the etcd client - #key: /path/to/key # path of key used by the etcd client - - verify: true # whether to verify the etcd endpoint certificate when setup a TLS connection to etcd, - # the default value is true, e.g. the certificate will be verified strictly. - #sni: # the SNI for etcd TLS requests. If missed, the host part of the URL will be used. - # HashiCorp Vault storage backend for sensitive data retrieval. The config shows an example of what APISIX expects if you # wish to integrate Vault for secret (sensetive string, public private keys etc.) retrieval. APISIX communicates with Vault # server HTTP APIs. By default, APISIX doesn't need this configuration. @@ -324,6 +302,70 @@ etcd: # connect: 2000 # default 2000ms # send: 2000 # default 2000ms # read: 5000 # default 5000ms +# nacos: +# host: +# - "http://${username}:${password}@${host1}:${port1}" +# prefix: "/nacos/v1/" +# fetch_interval: 30 # default 30 sec +# weight: 100 # default 100 +# timeout: +# connect: 2000 # default 2000 ms +# send: 2000 # default 2000 ms +# read: 5000 # default 5000 ms +# consul_kv: +# servers: +# - "http://127.0.0.1:8500" +# - "http://127.0.0.1:8600" +# prefix: "upstreams" +# skip_keys: # if you need to skip special keys +# - "upstreams/unused_api/" +# timeout: +# connect: 2000 # default 2000 ms +# read: 2000 # default 2000 ms +# wait: 60 # default 60 sec +# weight: 1 # default 1 +# fetch_interval: 3 # default 3 sec, only take effect for keepalive: false way +# keepalive: true # default true, use the long pull way to query consul servers +# default_server: # you can define default server when missing hit +# host: "127.0.0.1" +# port: 20999 +# metadata: +# fail_timeout: 1 # default 1 ms +# weight: 1 # default 1 +# max_fails: 1 # default 1 +# dump: # if you need, when registered nodes updated can dump into file +# path: "logs/consul_kv.dump" +# expire: 2592000 # unit sec, here is 30 day +# kubernetes: +# service: +# schema: https #apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST} +# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT} +# client: +# # serviceaccount token or path of serviceaccount token_file +# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} +# # token: |- +# # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif +# # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI +# # kubernetes discovery plugin support use namespace_selector +# # you can use one of [equal, not_equal, match, not_match] filter namespace +# namespace_selector: +# # only save endpoints with namespace equal default +# equal: default +# # only save endpoints with namespace not equal default +# #not_equal: default +# # only save endpoints with namespace match one of [default, ^my-[a-z]+$] +# #match: +# #- default +# #- ^my-[a-z]+$ +# # only save endpoints with namespace not match one of [default, ^my-[a-z]+$ ] +# #not_match: +# #- default +# #- ^my-[a-z]+$ +# # kubernetes discovery plugin support use label_selector +# # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +# label_selector: |- +# first="a",second="b" graphql: max_size: 1048576 # the maximum size limitation of graphql in bytes, default 1MiB @@ -335,11 +377,11 @@ plugins: # plugin list (sorted by priority) - real-ip # priority: 23000 - client-control # priority: 22000 - proxy-control # priority: 21990 + - request-id # priority: 12015 - zipkin # priority: 12011 #- skywalking # priority: 12010 #- opentelemetry # priority: 12009 - ext-plugin-pre-req # priority: 12000 - - request-id # priority: 11010 - fault-injection # priority: 11000 - mocking # priority: 10900 - serverless-pre-function # priority: 10000 @@ -368,6 +410,7 @@ plugins: # plugin list (sorted by priority) - proxy-mirror # priority: 1010 - proxy-cache # priority: 1009 - proxy-rewrite # priority: 1008 + - workflow # priority: 1006 - api-breaker # priority: 1005 - limit-conn # priority: 1003 - limit-count # priority: 1002 @@ -385,6 +428,7 @@ plugins: # plugin list (sorted by priority) - public-api # priority: 501 - prometheus # priority: 500 - datadog # priority: 495 + - elasticsearch-logger # priority: 413 - echo # priority: 412 - loggly # priority: 411 - http-logger # priority: 410 @@ -399,12 +443,14 @@ plugins: # plugin list (sorted by priority) - udp-logger # priority: 400 - file-logger # priority: 399 - clickhouse-logger # priority: 398 + - tencent-cloud-cls # priority: 397 #- log-rotate # priority: 100 # <- recommend to use priority (0, 100) for your custom plugins - example-plugin # priority: 0 - aws-lambda # priority: -1899 - azure-functions # priority: -1900 - openwhisk # priority: -1901 + - openfunction # priority: -1902 - serverless-post-function # priority: -2000 - ext-plugin-post-req # priority: -3000 - ext-plugin-post-resp # priority: -4000 @@ -431,6 +477,7 @@ plugin_attr: log-rotate: interval: 3600 # rotate interval (unit: second) max_kept: 168 # max number of log files will be kept + max_size: -1 # max size bytes of log files to be rotated, size check would be skipped with a value less than 0 enable_compression: false # enable log file compression(gzip) or not, default false skywalking: service_name: APISIX @@ -458,6 +505,20 @@ plugin_attr: export_addr: ip: 127.0.0.1 port: 9091 + #metrics: + # http_status: + # # extra labels from nginx variables + # extra_labels: + # # the label name doesn't need to be the same as variable name + # # below labels are only examples, you could add any valid variables as you need + # - upstream_addr: $upstream_addr + # - upstream_status: $upstream_status + # http_latency: + # extra_labels: + # - upstream_addr: $upstream_addr + # bandwidth: + # extra_labels: + # - upstream_addr: $upstream_addr server-info: report_ttl: 60 # live time for server info in etcd (unit: second) dubbo-proxy: @@ -478,13 +539,27 @@ plugin_attr: # redirect: # https_port: 8443 # the default port for use by HTTP redirects to HTTPS -#deployment: -# role: traditional -# role_traditional: -# config_provider: etcd -# etcd: -# host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. -# - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, -# # e.g. https://127.0.0.1:2379. -# prefix: /apisix # configuration prefix in etcd -# timeout: 30 # 30 seconds +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. + - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, + # e.g. https://127.0.0.1:2379. + prefix: /apisix # configuration prefix in etcd + timeout: 30 # 30 seconds + #resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter + #health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds + startup_retry: 2 # the number of retry to etcd during the startup, default to 2 + #user: root # root username for etcd + #password: 5tHkHhYkjr6cQY # root password for etcd + tls: + # To enable etcd client certificate you need to build APISIX-Base, see + # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment + #cert: /path/to/cert # path of certificate used by the etcd client + #key: /path/to/key # path of key used by the etcd client + + verify: true # whether to verify the etcd endpoint certificate when setup a TLS connection to etcd, + # the default value is true, e.g. the certificate will be verified strictly. + #sni: # the SNI for etcd TLS requests. If missed, the host part of the URL will be used. diff --git a/conf/config.yaml b/conf/config.yaml index 421ac0912aa6..6a5f56205a36 100644 --- a/conf/config.yaml +++ b/conf/config.yaml @@ -17,13 +17,21 @@ # If you want to set the specified configuration value, you can set the new # in this file. For example if you want to specify the etcd address: # -# etcd: +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: # host: # - http://127.0.0.1:2379 # # To configure via environment variables, you can use `${{VAR}}` syntax. For instance: # -# etcd: +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: # host: # - http://${{ETCD_HOST}}:2379 # @@ -34,7 +42,11 @@ # Also, If you want to use default value when the environment variable not set, # Use `${{VAR:=default_value}}` instead. For instance: # -# etcd: +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: # host: # - http://${{ETCD_HOST:=localhost}}:2379 # diff --git a/docs/assets/images/control-plane-service-discovery.png b/docs/assets/images/control-plane-service-discovery.png new file mode 100644 index 000000000000..034f81c76803 Binary files /dev/null and b/docs/assets/images/control-plane-service-discovery.png differ diff --git a/docs/assets/images/external-plugin.png b/docs/assets/images/external-plugin.png index a0b3d94c1100..38c3cdebc47a 100644 Binary files a/docs/assets/images/external-plugin.png and b/docs/assets/images/external-plugin.png differ diff --git a/docs/assets/other/json/apisix-grafana-dashboard.json b/docs/assets/other/json/apisix-grafana-dashboard.json index 247d9b3bc152..1ea90c10ca34 100644 --- a/docs/assets/other/json/apisix-grafana-dashboard.json +++ b/docs/assets/other/json/apisix-grafana-dashboard.json @@ -1622,6 +1622,111 @@ "timeShift": null, "title": "Nginx metric errors", "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "The free space percent of each nginx shared DICT since APISIX start", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 35, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(apisix_shared_dict_free_space_bytes * 100) / on (name) apisix_shared_dict_capacity_bytes", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Nginx shared dict free space percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:117", + "decimals": null, + "format": "percent", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:118", + "decimals": null, + "format": "Misc", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "refresh": "5s", diff --git a/docs/en/latest/FAQ.md b/docs/en/latest/FAQ.md index ebf7f0253df0..169a03ffee46 100644 --- a/docs/en/latest/FAQ.md +++ b/docs/en/latest/FAQ.md @@ -118,7 +118,7 @@ There are two different ways to achieve this in Apache APISIX: 1. Using the `vars` field in a [Route](terminology/route.md): ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "vars": [ @@ -131,7 +131,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 } }' -curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "vars": [ @@ -158,7 +158,7 @@ Apache APISIX provides several different ways to achieve this: 1. Setting `http_to_https` to `true` in the [redirect](plugins/redirect.md) Plugin: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "host": "foo.com", @@ -173,7 +173,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 2. Advanced routing with `vars` in the redirect Plugin: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "host": "foo.com", @@ -196,7 +196,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433 3. Using the `serverless` Plugin: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": { @@ -267,15 +267,16 @@ To configure Apache APISIX to listen on multiple ports, you can: - 9082 ``` - Similarly for HTTPS requests, modify the parameter `ssl.listen_port` in `conf/config.yaml`: + Similarly for HTTPS requests, modify the parameter `ssl.listen` in `conf/config.yaml`: ``` apisix: ssl: - listen_port: - - 9443 - - 9444 - - 9445 + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 ``` 2. Reload or restart Apache APISIX. @@ -365,7 +366,9 @@ You can follow the steps below to configure this: ```yaml apisix: - port_admin: 9180 # use a separate port + admin_listen: # use a separate port + ip: 127.0.0.1 + port: 9180 ``` 2. Add a proxy Route for the Apache APISIX dashboard: @@ -395,7 +398,7 @@ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f03433 You can use the `vars` field in a Route for matching regular expressions: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/*", "vars": [ @@ -431,7 +434,7 @@ For more info on using `vars` refer to [lua-resty-expr](https://github.com/api7/ Yes. The example below shows configuring the FQDN `httpbin.default.svc.cluster.local` (a Kubernetes service): ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/ip", "upstream": { @@ -469,7 +472,7 @@ apisix: Now, to access the Admin API: ```shell -$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' +$ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' { "uris":[ "/*" ], "name":"admin-token-test", @@ -532,7 +535,7 @@ You can check [this post](https://juejin.cn/post/6965778290619449351) for a more To strip a prefix from a path in your route, like to take `/foo/get` and strip it to `/get`, you can use the [proxy-rewrite](plugins/proxy-rewrite.md) Plugin: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/foo/*", "plugins": { @@ -591,6 +594,36 @@ The differences between the two are described in the table below: | Used when there are property changes that needs to be propagated across all configuration instances of a Plugin. | Used when you need to reuse a common set of configuration instances so that it can be extracted to a `plugin-config` and bound to different Routes. | | Takes effect on all the entities bound to the configuration instances of the Plugin. | Takes effect on Routes bound to the `plugin-config`. | +## After deploying Apache APISIX, how to detect the survival of the APISIX data plane? + +You can create a route named `health-info` and enable the [fault-injection](https://apisix.apache.org/docs/apisix/plugins/fault-injection/) plugin (where YOUR-TOKEN is the user's token; 127.0.0.1 is the IP address of the control plane, which can be modified by yourself): + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/health-info \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "fine" + } + } + }, + "uri": "/status" +}' +```` + +Verification: + +Access the `/status` of the Apache APISIX data plane to detect APISIX. If the response code is 200, it means APISIX is alive. + +:::note + +This method only detects whether the APISIX data plane is alive or not. It does not mean that the routing and other functions of APISIX are normal. These require more routing-level detection. + +::: + ## Where can I find more answers? You can find more answers on: diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md index d78eb0d6e512..87191479dacc 100644 --- a/docs/en/latest/admin-api.md +++ b/docs/en/latest/admin-api.md @@ -23,17 +23,103 @@ title: Admin API The Admin API lets users control their deployed Apache APISIX instance. The [architecture design](./architecture-design/apisix.md) gives an idea about how everything fits together. -By default, the Admin API listens to port `9080` (`9443` for HTTPS) when APISIX is launched. This can be changed by modifying your configuration file ([conf/config.yaml](https://github.com/apache/apisix/blob/master/conf/config.yaml)). +By default, the Admin API listens to port `9180` when APISIX is launched. This can be changed by modifying your configuration file ([conf/config.yaml](https://github.com/apache/apisix/blob/master/conf/config.yaml)). **Note**: Mentions of `X-API-KEY` in this document refers to `apisix.admin_key.key`—the access token for Admin API—in your configuration file. +## V3 + +The Admin API has made some breaking changes in V3 version, as well as supporting additional features. + +### Support new response body format + +1. Remove `action` field in response body; +2. Adjust the response body structure when fetching the list of resources, the new response body structure like: + +```json +{ + "count":2, + "list":[ + { + ... + }, + { + ... + } + ] +} +``` + +### Support paging query + +Paging query is supported when getting the resource list, paging parameters include: + +| parameter | Default | Valid range | Description | +| --------- | ------ | ----------- | ---------------------------- | +| page | 1 | [1, ...] | Number of pages | +| page_size | | [10, 500] | Number of resources per page | + +The example is as follows: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes?page=1&page_size=10 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X GET -i -d ' +{ + "count": 1, + "list": [ + { + ... + } + ] +} +``` + +Resources that support paging queries: + +- Consumer +- Global Rules +- Plugin Config +- Proto +- Route +- Service +- SSL +- Stream Route +- Upstream + +### Support filtering query + +When getting a list of resources, it supports filtering resources based on `name`, `label`, `uri`. + +| parameter | parameter | +| --------- | ------------------------------------------------------------ | +| name | Query resource by their `name`, which will not appear in the query results if the resource itself does not have `name`. | +| label | Query resource by their `label`, which will not appear in the query results if the resource itself does not have `label`. | +| uri | Supported on Route resources only. If the `uri` of a Route is equal to the uri of the query or if the `uris` contains the uri of the query, the Route resource appears in the query results. | + +When multiple filter parameters are enabled, use the intersection of the query results for different filter parameters. + +The following example will return a list of routes, and all routes in the list satisfy: the `name` of the route contains the string "test", the `uri` contains the string "foo", and there is no restriction on the `label` of the route, since the label of the query is the empty string. + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label= \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X GET -i -d ' +{ + "count": 1, + "list": [ + { + ... + } + ] +} +``` + ## Route **API**: /apisix/admin/routes/{id}?ttl=0 [Routes](./terminology/route.md) match the client's request based on defined rules, loads and executes the corresponding [plugins](#plugin), and forwards the request to the specified [Upstream](#upstream). -**Note**: When the Admin API is enabled, to avoid conflicts with your design API, use a different port for the Admin API. This can be set in your configuration file by changing the `port_admin` key. +**Note**: When the Admin API is enabled, to avoid conflicts with your design API, use a different port for the Admin API. This can be set in your configuration file by changing the `admin_listen` key. ### Request Methods @@ -103,7 +189,7 @@ Example configuration: "send": 3, "read": 3 }, - "filter_func": "", # User-defined filtering function + "filter_func": "" # User-defined filtering function } ``` @@ -111,7 +197,7 @@ Example API usage: ```shell # Create a route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "hosts": ["foo.com", "*.bar.com"], @@ -131,7 +217,7 @@ Date: Sat, 31 Aug 2019 01:17:15 GMT ... # Create a route expires after 60 seconds, then it's deleted automatically -$ curl http://127.0.0.1:9080/apisix/admin/routes/2?ttl=60 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2?ttl=60 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/aa/index.html", "upstream": { @@ -148,7 +234,7 @@ Date: Sat, 31 Aug 2019 01:17:15 GMT # Add an upstream node to the Route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -167,7 +253,7 @@ After successful execution, upstream nodes will be updated to: # Update the weight of an upstream node to the Route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -186,7 +272,7 @@ After successful execution, upstream nodes will be updated to: # Delete an upstream node for the Route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -204,7 +290,7 @@ After successful execution, upstream nodes will be updated to: # Replace methods of the Route -- array -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{ +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d '{ "methods": ["GET", "POST"] }' HTTP/1.1 200 OK @@ -215,7 +301,7 @@ After successful execution, methods will not retain the original data, and the e # Replace upstream nodes of the Route -- sub path -$ curl http://127.0.0.1:9080/apisix/admin/routes/1/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -229,7 +315,7 @@ After successful execution, nodes will not retain the original data, and the ent # Replace methods of the Route -- sub path -$ curl http://127.0.0.1:9080/apisix/admin/routes/1/methods -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d'["POST", "DELETE", " PATCH"]' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1/methods -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d'["POST", "DELETE", " PATCH"]' HTTP/1.1 200 OK ... @@ -238,7 +324,7 @@ After successful execution, methods will not retain the original data, and the e # disable route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "status": 0 }' @@ -252,7 +338,7 @@ After successful execution, status nodes will be updated to: # enable route -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "status": 1 }' @@ -326,7 +412,7 @@ Example configuration: Example API usage: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "plugins": { "limit-count": { @@ -350,7 +436,7 @@ HTTP/1.1 201 Created # Add an upstream node to the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -369,7 +455,7 @@ After successful execution, upstream nodes will be updated to: # Update the weight of an upstream node to the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -388,7 +474,7 @@ After successful execution, upstream nodes will be updated to: # Delete an upstream node for the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "upstream": { "nodes": { @@ -406,7 +492,7 @@ After successful execution, upstream nodes will be updated to: # Replace upstream nodes of the Service -$ curl http://127.0.0.1:9080/apisix/admin/services/201/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/201/upstream/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -458,7 +544,7 @@ Example Configuration: { "plugins": {}, # Bound plugin "username": "name", # Consumer name - "desc": "hello world", # Consumer desc + "desc": "hello world" # Consumer desc } ``` @@ -467,7 +553,7 @@ When bound to a Route or Service, the Authentication Plugin infers the Consumer Example API usage: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "username": "jack", "plugins": { @@ -486,7 +572,7 @@ HTTP/1.1 200 OK Date: Thu, 26 Dec 2019 08:17:49 GMT ... -{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63},"action":"set"} +{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63}} ``` Since `v2.2`, we can bind multiple authentication plugins to the same consumer. @@ -524,7 +610,7 @@ In addition to the equalization algorithm selections, Upstream also supports pas | Name | Optional | Description | Example | | --------------------------- | ------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | | type | required | Load balancing algorithm to be used. | | -| nodes | required, can't be used with `service_name` | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80` | +| nodes | required, can't be used with `service_name` | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority`. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` | | service_name | required, can't be used with `nodes` | Service name used for [service discovery](discovery.md). | `a-bootiful-client` | | discovery_type | required, if `service_name` is used | The type of service [discovery](discovery.md). | `eureka` | | hash_on | optional | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | | @@ -565,7 +651,7 @@ The following should be considered when setting the `hash_on` value: - When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`. - When no value is set for either `hash_on` or `key`, the key defaults to `remote_addr`. -The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment?): +The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment): You can set the `scheme` to `tls`, which means "TLS over TCP". @@ -584,7 +670,7 @@ Example Configuration: "timeout": { # Set the timeout for connecting, sending and receiving messages, each is 15 seconds. "connect":15, "send":15, - "read":15, + "read":15 }, "nodes": {"host:80": 100}, # Upstream machine address list, the format is `Address + Port` # is the same as "nodes": [ {"host": "host", "port": 80, "weight": 100} ], @@ -594,7 +680,7 @@ Example Configuration: "key": "", "name": "upstream-for-test", "desc": "hello world", - "scheme": "http", # The scheme used when communicating with upstream, the default is `http` + "scheme": "http" # The scheme used when communicating with upstream, the default is `http` } ``` @@ -604,7 +690,7 @@ Example 1: Create an Upstream and modify the data in `nodes` ```shell # Create upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "type":"roundrobin", "nodes":{ @@ -616,7 +702,7 @@ HTTP/1.1 201 Created # Add a node to the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1981": 1 @@ -633,7 +719,7 @@ After successful execution, nodes will be updated to: # Update the weight of a node to the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1981": 10 @@ -650,7 +736,7 @@ After successful execution, nodes will be updated to: # Delete a node for the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100 -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "nodes": { "127.0.0.1:1980": null @@ -666,7 +752,7 @@ After successful execution, nodes will be updated to: # Replace the nodes of the Upstream -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/100/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/100/nodes -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' { "127.0.0.1:1982": 1 }' @@ -685,7 +771,7 @@ Example 2: Proxy client request to `https` Upstream service 1. Create a route and configure the upstream scheme as `https`. ```shell -$ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get", "upstream": { @@ -766,17 +852,17 @@ Currently, the response is returned from etcd. ## SSL -**API**:/apisix/admin/ssl/{id} +**API**:/apisix/admin/ssls/{id} ### Request Methods | Method | Request URI | Request Body | Description | | ------ | ---------------------- | ------------ | ----------------------------------------------- | -| GET | /apisix/admin/ssl | NULL | Fetches a list of all configured SSL resources. | -| GET | /apisix/admin/ssl/{id} | NULL | Fetch specified resource by id. | -| PUT | /apisix/admin/ssl/{id} | {...} | Creates a resource with the specified id. | -| POST | /apisix/admin/ssl | {...} | Creates a resource and assigns a random id. | -| DELETE | /apisix/admin/ssl/{id} | NULL | Removes the resource with the specified id. | +| GET | /apisix/admin/ssls | NULL | Fetches a list of all configured SSL resources. | +| GET | /apisix/admin/ssls/{id} | NULL | Fetch specified resource by id. | +| PUT | /apisix/admin/ssls/{id} | {...} | Creates a resource with the specified id. | +| POST | /apisix/admin/ssls | {...} | Creates a resource and assigns a random id. | +| DELETE | /apisix/admin/ssls/{id} | NULL | Removes the resource with the specified id. | ### Request Body Parameters @@ -881,7 +967,7 @@ A JSON object defined according to the `metadata_schema` of the Plugin ({plugin_ Example Configuration: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/example-plugin -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/example-plugin -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "skey": "val", "ikey": 1 @@ -911,11 +997,11 @@ The Plugin ({plugin_name}) of the data structure. Example API usage: ```shell -$ curl "http://127.0.0.1:9080/apisix/admin/plugins/list" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +$ curl "http://127.0.0.1:9180/apisix/admin/plugins/list" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ["zipkin","request-id",...] -$ curl "http://127.0.0.1:9080/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -{"properties":{"disable":{"type":"boolean"}},"additionalProperties":false,"type":"object"} +$ curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +{"$comment":"this is a mark for our injected plugin schema","properties":{"header":{"default":"apikey","type":"string"},"hide_credentials":{"default":false,"type":"boolean"},"_meta":{"properties":{"filter":{"type":"array","description":"filter determines whether the plugin needs to be executed at runtime"},"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"type":"integer","description":"priority of plugins by customized order"}},"type":"object"},"query":{"default":"apikey","type":"string"}},"type":"object"} ``` **API**: /apisix/admin/plugins?all=true diff --git a/docs/en/latest/architecture-design/apisix.md b/docs/en/latest/architecture-design/apisix.md index 8bef62289192..2e76aab68941 100644 --- a/docs/en/latest/architecture-design/apisix.md +++ b/docs/en/latest/architecture-design/apisix.md @@ -1,7 +1,11 @@ --- -title: APISIX +title: Architecture +keywords: + - API gateway + - Apache APISIX + - APISIX architecture +description: Architecture of Apache APISIX—the Cloud Native API Gateway. --- - -## Apache APISIX : Software Architecture - -![flow-software-architecture](../../../assets/images/flow-software-architecture.png) - -## Plugin Loading Process - -![flow-load-plugin](../../../assets/images/flow-load-plugin.png) - -## Plugin Hierarchy Structure - -![flow-plugin-internal](../../../assets/images/flow-plugin-internal.png) - -## Configuring APISIX +APISIX is built on top of Nginx and [ngx_lua](https://github.com/openresty/lua-nginx-module) leveraging the power offered by LuaJIT. See [Why Apache APISIX chose Nginx and Lua to build API Gateway?](https://apisix.apache.org/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/). -Apache APISIX can be configured in two ways: +![flow-software-architecture](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-software-architecture.png) -1. By directly changing `conf/config.yaml`. -2. Using the `--config` or the `-c` flag to pass in the file path of your config file while starting APISIX (`apisix start -c `). +APISIX has two main parts: -Configurations can be added to this YAML file and Apache APISIX will fall back to the default configurations for anything that is not configured in this file. +1. APISIX core, Lua plugin, multi-language Plugin runtime, and the WASM plugin runtime. +2. Built-in Plugins that adds features for observability, security, traffic control, etc. -For example, to set the default listening port to 8000 while keeping other configurations as default, your configuration file (`config.yaml`) would look like: +The APISIX core handles the important functions like matching Routes, load balancing, service discovery, configuration management, and provides a management API. It also includes APISIX Plugin runtime supporting Lua and multilingual Plugins (Go, Java , Python, JavaScript, etc) including the experimental WASM Plugin runtime. -```yaml -apisix: - node_listen: 8000 # APISIX listening port -``` +APISIX also has a set of [built-in Plugins](https://apisix.apache.org/docs/apisix/plugins/batch-requests) that adds features like authentication, security, observability, etc. They are written in Lua. -Similarly, to set the listening port to 8000 and set the etcd address to `http://foo:2379` while keeping other configurations as default, your configuration file would look like: +## Request handling process -```yaml -apisix: - node_listen: 8000 # APISIX listening port +The diagram below shows how APISIX handles an incoming request and applies corresponding Plugins: -etcd: - host: "http://foo:2379" # etcd address -``` +![flow-load-plugin](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-load-plugin.png) -Default configurations of APISIX can be found in the `conf/config-default.yaml` file. +## Plugin hierarchy -**Note**: This file is bound to the APISIX source code and should **NOT** be modified. The configuration should only be changed by the methods mentioned above. +The chart below shows the order in which different types of Plugin are applied to a request: -**Note**: The `conf/nginx.conf` file is automatically generated by APISIX and should **NOT** be edited. +![flow-plugin-internal](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-plugin-internal.png) diff --git a/docs/en/latest/architecture-design/debug-mode.md b/docs/en/latest/architecture-design/debug-mode.md deleted file mode 100644 index 479bdec69bac..000000000000 --- a/docs/en/latest/architecture-design/debug-mode.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Debug Mode ---- - - - -### Basic Debug Mode - -You can enable the basic debug mode by adding this line to your `conf/debug.yaml` file. - -``` -basic: - enable: true -``` - -**Note**: Before Apache APISIX 2.10, basic debug mode was enabled by setting `apisix.enable_debug = true` in the `conf/config.yaml` file. - -For example, if we are using two plugins `limit-conn` and `limit-count` for a Route `/hello`, we will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when we enable the basic debug mode. - -```shell -$ curl http://127.0.0.1:1984/hello -i -HTTP/1.1 200 OK -Content-Type: text/plain -Transfer-Encoding: chunked -Connection: keep-alive -Apisix-Plugins: limit-conn, limit-count -X-RateLimit-Limit: 2 -X-RateLimit-Remaining: 1 -Server: openresty - -hello world -``` - -If the debug information cannot be included in a response header (say when the plugin is in a stream subsystem), the information will be logged in the error log at a `warn` level. - -### Advanced Debug Mode - -Advanced debug mode can also be enabled by modifying the configuration in the `conf/debug.yaml` file. - -Enable advanced debug mode by modifying the configuration in `conf/debug.yaml` file. - -The checker checks every second for changes to the configuration files. An `#END` flag is added to let the checker know that it should only look for changes till that point. - -The checker would only check this if the file was updated by checking its last modification time. - -| Key | Optional | Description | Default | -| ------------------------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| hook_conf.enable | required | Enable/Disable hook debug trace. Target module function's input arguments or returned value would be printed once this option is enabled. | false | -| hook_conf.name | required | The module list name of the hook which has enabled debug trace. | | -| hook_conf.log_level | required | Logging levels for input arguments & returned values. | warn | -| hook_conf.is_print_input_args | required | Enable/Disable printing input arguments. | true | -| hook_conf.is_print_return_value | required | Enable/Disable printing returned values. | true | - -Example: - -```yaml -hook_conf: - enable: false # Enable/Disable Hook Debug Trace - name: hook_phase # The Module List Name of Hook which has enabled Debug Trace - log_level: warn # Logging Levels - is_print_input_args: true # Enable/Disable Input Arguments Print - is_print_return_value: true # Enable/Disable Returned Value Print - -hook_phase: # Module Function List, Name: hook_phase - apisix: # Referenced Module Name - - http_access_phase # Function Names:Array - - http_header_filter_phase - - http_body_filter_phase - - http_log_phase -#END -``` - -### Enable Advanced Debug Mode Dynamically - -You can also enable the advanced debug mode to take effect on particular requests. - -For example, to dynamically enable advanced debugging mode on requests with a particular header name `X-APISIX-Dynamic-Debug` you can configure: - -```yaml -http_filter: - enable: true # Enable/Disable Advanced Debug Mode Dynamically - enable_header_name: X-APISIX-Dynamic-Debug # Trace for the request with this header -...... -#END -``` - -This will enable the advanced debug mode for requests like: - -```shell -curl 127.0.0.1:9090/hello --header 'X-APISIX-Dynamic-Debug: foo' -``` - -**Note**: The `apisix.http_access_phase` module cannot be hooked for dynamic rules as the advanced debug mode is enabled based on the request. diff --git a/docs/en/latest/architecture-design/deployment-role.md b/docs/en/latest/architecture-design/deployment-role.md deleted file mode 100644 index 5e750e7f17dd..000000000000 --- a/docs/en/latest/architecture-design/deployment-role.md +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: Deployment Role ---- - - - -## Concept - -Previously, the DP (Data Plane) and the CP (Control Plane) are not separate explicitly. - -Although we clearly distinguish the different responsibilities of DP and CP in the documentation, not everyone has correctly deployed APISIX in the production environment. - -Therefore, we introduce new concepts called deployment modes/roles, to help users deploy APISIX easily and safely. - -APISIX under different deployment modes will act differently. - -The table below shows the relationship among deployment modes and roles: - -| Deployment Modes | Role | Description | -|------------------|----------------------------|------------------------------------------------------------------------------------------| -| traditional | traditional | DP + CP are deployed together by default. People need to disable `enable_admin` manually | -| decoupled | data_plane / control_plane | DP and CP are deployed independently. | -| standalone | data_plane | Only DP, load the all configurations from local yaml file | - -## Deployment Modes - -### Traditional - -![traditional](../../../assets/images/deployment-traditional.png) - -In the traditional deployment mode, one instance can be both DP & CP. - -There will be a `conf server` listens on UNIX socket and acts as a proxy between APISIX and etcd. - -Both the DP part and CP part of the instance will connect to the `conf server` via HTTP protocol. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - http://xxxx - prefix: /apisix - timeout: 30 -``` - -### Decoupled - -![decoupled](../../../assets/images/deployment-cp_and_dp.png) - -The instance deployed as data_plane will: - -1. Fetch configurations from the CP, the default port is 9280 -2. Before the DP service starts, it will perform a health check on all CP addresses - - If all CP addresses are unavailable, the startup fails and an exception message is output to the screen. - - If at least one CP address is available, print the unhealthy CP check result log, and then start the APISIX service. - - If all CP addresses are normal, start the APISIX service normally. -3. Handle user requests. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: data_plane - role_data_plane: - config_provider: control_plane - control_plane: - host: - - xxxx:9280 - timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert -``` - -The instance deployed as control_plane will: - -1. Listen on 9180 by default, and provide Admin API for Admin user -2. Provide `conf server` which listens on port 9280 by default. Both the DP instances and this CP instance will connect to the `conf server` via HTTPS enforced by mTLS. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: control_plane - role_control_plan: - config_provider: etcd - conf_server: - listen: 0.0.0.0:9280 - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - client_ca_cert: /path/to/ca-cert - etcd: - host: - - https://xxxx - prefix: /apisix - timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert -``` - -### Standalone - -In this mode, APISIX is deployed as DP and reads configurations from yaml file in the local file system. - -Here is the example of configuration: - -```yaml title="conf/config.yaml" -deployment: - role: data_plane - role_data_plane: - config_provider: yaml -``` diff --git a/docs/en/latest/batch-processor.md b/docs/en/latest/batch-processor.md index a790dbcd2139..0e7020930447 100644 --- a/docs/en/latest/batch-processor.md +++ b/docs/en/latest/batch-processor.md @@ -83,7 +83,7 @@ The batch processor's configuration will be set inside the plugin's configuratio For example: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "http-logger": { diff --git a/docs/en/latest/benchmark.md b/docs/en/latest/benchmark.md index dc25c781dac4..b8482603560c 100644 --- a/docs/en/latest/benchmark.md +++ b/docs/en/latest/benchmark.md @@ -53,7 +53,7 @@ The result of Flame Graph: And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", @@ -98,7 +98,7 @@ The result of Flame Graph: And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/hello", diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md new file mode 100644 index 000000000000..64574cb987a0 --- /dev/null +++ b/docs/en/latest/building-apisix.md @@ -0,0 +1,282 @@ +--- +id: building-apisix +title: Building APISIX from source +keywords: + - API gateway + - Apache APISIX + - Code Contribution + - Building APISIX +description: Guide for building and running APISIX locally for development. +--- + + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +If you are looking to contribute to APISIX or setup a development environment, this guide is for you. + +If you are looking to install and run APISIX, check out the [Installation](./installation-guide.md) docs. + +:::note + +If you want to build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools). + +::: + +## Building APISIX from source + +To start, you have to install some dependencies. APISIX provides a handy script to get these installed: + +```shell +curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash - +``` + +Then, create a directory and set the environment variable `APISIX_VERSION`: + +```shell +APISIX_VERSION='2.15.0' +mkdir apisix-${APISIX_VERSION} +``` + +You can now download the APISIX source code by running the command below: + +```shell +wget https://downloads.apache.org/apisix/${APISIX_VERSION}/apache-apisix-${APISIX_VERSION}-src.tgz +``` + +You can also download the source package from the [Downloads page](https://apisix.apache.org/downloads/). You will also find source packages for APISIX Dashboard and APISIX Ingress Controller. + +After you have downloaded the package, you can extract the files to the folder created previously: + +```shell +tar zxvf apache-apisix-${APISIX_VERSION}-src.tgz -C apisix-${APISIX_VERSION} +``` + +Now, navigate to the directory, create dependencies, and install APISIX as shown below: + +```shell +cd apisix-${APISIX_VERSION} +make deps +make install +``` + +This will install the runtime dependent Lua libraries and the `apisix` command. + +:::note + +If you get an error message like `Could not find header file for LDAP/PCRE/openssl` while running `make deps`, use this solution. + +`luarocks` supports custom compile-time dependencies (See: [Config file format](https://github.com/luarocks/luarocks/wiki/Config-file-format)). You can use a third-party tool to install the missing packages and add its installation directory to the `luarocks`' variables table. This method works on macOS, Ubuntu, CentOS, and other similar operating systems. + +The solution below is for macOS but it works similarly for other operating systems: + +1. Install `openldap` by running: + + ```shell + brew install openldap + ``` + +2. Locate the installation directory by running: + + ```shell + brew --prefix openldap + ``` + +3. Add this path to the project configuration file by any of the two methods shown below: + 1. You can use the `luarocks config` command to set `LDAP_DIR`: + + ```shell + luarocks config variables.LDAP_DIR /opt/homebrew/cellar/openldap/2.6.1 + ``` + + 2. You can also change the default configuration file of `luarocks`. Open the file `~/.luaorcks/config-5.1.lua` and add the following: + + ```shell + variables = { LDAP_DIR = "/opt/homebrew/cellar/openldap/2.6.1", LDAP_INCDIR = "/opt/homebrew/cellar/openldap/2.6.1/include", } + ``` + + `/opt/homebrew/cellar/openldap/` is default path `openldap` is installed on Apple Silicon macOS machines. For Intel machines, the default path is `/usr/local/opt/openldap/`. + +::: + +To uninstall the APISIX runtime, run: + +```shell +make uninstall +make undeps +``` + +:::danger + +This operation will remove the files completely. + +::: + +## Installing etcd + +APISIX uses [etcd](https://github.com/etcd-io/etcd) to save and synchronize configuration. Before running APISIX, you need to install etcd on your machine. Installation methods based on your operating system are mentioned below. + + + + +```shell +ETCD_VERSION='3.4.18' +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz +tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ + cd etcd-v${ETCD_VERSION}-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ +nohup etcd >/tmp/etcd.log 2>&1 & +``` + + + + + +```shell +brew install etcd +brew services start etcd +``` + + + + +## Running and managing APISIX server + +To initialize the configuration file, within the APISIX directory, run: + +```shell +apisix init +``` + +:::tip + +You can run `apisix help` to see a list of available commands. + +::: + +You can then test the created configuration file by running: + +```shell +apisix test +``` + +Finally, you can run the command below to start APISIX: + +```shell +apisix start +``` + +To stop APISIX, you can use either the `quit` or the `stop` subcommand. + +`apisix quit` will gracefully shutdown APISIX. It will ensure that all received requests are completed before stopping. + +```shell +apisix quit +``` + +Where as, the `apisix stop` command does a force shutdown and discards all pending requests. + +```shell +apisix stop +``` + +## Building runtime for APISIX + +Some features of APISIX requires additional Nginx modules to be introduced into OpenResty. + +To use these features, you need to build a custom distribution of OpenResty (apisix-base). See [apisix-build-tools](https://github.com/api7/apisix-build-tools) for setting up your build environment and building it. + +## Running tests + +The steps below show how to run the test cases for APISIX: + +1. Install [cpanminus](https://metacpan.org/pod/App::cpanminus#INSTALLATION), the package manager for Perl. +2. Install the [test-nginx](https://github.com/openresty/test-nginx) dependencies with `cpanm`: + + ```shell + sudo cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + ``` + +3. Clone the test-nginx source code locally: + + ```shell + git clone https://github.com/openresty/test-nginx.git + ``` + +4. Append the current directory to Perl's module directory by running: + + ```shell + export PERL5LIB=.:$PERL5LIB + ``` + + You can specify the Nginx binary path by running: + + ```shell + TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t + ``` + +5. Run the tests by running: + + ```shell + make test + ``` + +:::note + +Some tests rely on external services and system configuration modification. See [ci/linux_openresty_common_runner.sh](https://github.com/apache/apisix/blob/master/ci/linux_openresty_common_runner.sh) for a complete test environment build. + +::: + +### Troubleshooting + +These are some common troubleshooting steps for running APISIX test cases. + +#### Configuring Nginx path + +For the error `Error unknown directive "lua_package_path" in /API_ASPIX/apisix/t/servroot/conf/nginx.conf`, ensure that OpenResty is set to the default Nginx and export the path as follows: + +- Linux default installation path: + + ```shell + export PATH=/usr/local/openresty/nginx/sbin:$PATH + ``` + +- macOS default installation path (view homebrew): + + ```shell + export PATH=/usr/local/opt/openresty/nginx/sbin:$PATH + ``` + +#### Running a specific test case + +To run a specific test case, use the command below: + +```shell +prove -Itest-nginx/lib -r t/plugin/openid-connect.t +``` + +See [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md) for more details. diff --git a/docs/en/latest/certificate.md b/docs/en/latest/certificate.md index 5507e5ee3ba8..94e74d722d11 100644 --- a/docs/en/latest/certificate.md +++ b/docs/en/latest/certificate.md @@ -50,7 +50,7 @@ with open(sys.argv[2]) as f: key = f.read() sni = sys.argv[3] api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ "cert": cert, "key": key, "snis": [sni], @@ -66,7 +66,7 @@ print(resp.text) ./ssl.py t.crt t.key test.com # create Router object -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["test.com"], @@ -111,7 +111,7 @@ Here is an example, note that the value we pass as `sni` is `*.test.com`. ```shell ./ssl.py t.crt t.key '*.test.com' -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["*.test.com"], @@ -171,3 +171,152 @@ private keys by `certs` and `keys`. `APISIX` will pair certificate and private key with the same indice as a SSL key pair. So the length of `certs` and `keys` must be same. + +### set up multiple CA certificates + +APISIX currently uses CA certificates in several places, such as [Protect Admin API](./mtls.md#protect-admin-api), [etcd with mTLS](./mtls.md#etcd-with-mtls), and [Deployment Modes](./architecture-design/deployment-role.md). + +In these places, `ssl_trusted_certificate` or `trusted_ca_cert` will be used to set up the CA certificate, but these configurations will eventually be translated into [lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate) directive in OpenResty. + +If you need to set up different CA certificates in different places, then you can package these CA certificates into a CA bundle file and point to this file when you need to set up CAs. This will avoid the problem that the generated `lua_ssl_trusted_certificate` has multiple locations and overwrites each other. + +The following is a complete example to show how to set up multiple CA certificates in APISIX. + +Suppose we let client and APISIX Admin API, APISIX and ETCD communicate with each other using mTLS protocol, and currently there are two CA certificates, `foo_ca.crt` and `bar_ca.crt`, and use each of these two CA certificates to issue client and server certificate pairs, `foo_ca.crt` and its issued certificate pair are used to protect Admin API, and `bar_ca.crt` and its issued certificate pair are used to protect ETCD. + +The following table details the configurations involved in this example and what they do: + +| Configuration | Type | Description | +| ------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| foo_ca.crt | CA cert | Issues the secondary certificate required for the client to communicate with the APISIX Admin API over mTLS. | +| foo_client.crt | cert | A certificate issued by `foo_ca.crt` and used by the client to prove its identity when accessing the APISIX Admin API. | +| foo_client.key | key | Issued by `foo_ca.crt`, used by the client, the key file required to access the APISIX Admin API. | +| foo_server.crt | cert | Issued by `foo_ca.crt`, used by APISIX, corresponding to the `apisix.admin_api_mtls.admin_ssl_cert` configuration entry. | +| foo_server.key | key | Issued by `foo_ca.crt`, used by APISIX, corresponding to the `apisix.admin_api_mtls.admin_ssl_cert_key` configuration entry. | +| admin.apisix.dev | doname | Common Name used in issuing `foo_server.crt` certificate, through which the client accesses APISIX Admin API | +| bar_ca.crt | CA cert | Issues the secondary certificate required for APISIX to communicate with ETCD over mTLS. | +| bar_etcd.crt | cert | Issued by `bar_ca.crt` and used by ETCD, corresponding to the `-cert-file` option in the ETCD startup command. | +| bar_etcd.key | key | Issued by `bar_ca.crt` and used by ETCD, corresponding to the `--key-file` option in the ETCD startup command. | +| bar_apisix.crt | cert | Issued by `bar_ca.crt`, used by APISIX, corresponding to the `etcd.tls.cert` configuration entry. | +| bar_apisix.key | key | Issued by `bar_ca.crt`, used by APISIX, corresponding to the `etcd.tls.key` configuration entry. | +| etcd.cluster.dev | key | Common Name used in issuing `bar_etcd.crt` certificate, which is used as SNI when APISIX communicates with ETCD over mTLS. corresponds to `etcd.tls.sni` configuration item. | +| apisix.ca-bundle | CA bundle | Merged from `foo_ca.crt` and `bar_ca.crt`, replacing `foo_ca.crt` and `bar_ca.crt`. | + +1. Create CA bundle files + +``` +cat /path/to/foo_ca.crt /path/to/bar_ca.crt > apisix.ca-bundle +``` + +2. Start the ETCD cluster and enable client authentication + +Start by writing a `goreman` configuration named `Procfile-single-enable-mtls`, the content as: + +```text +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: etcd --name infra1 --listen-client-urls https://127.0.0.1:12379 --advertise-client-urls https://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd2: etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd3: etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +``` + +Use `goreman` to start the ETCD cluster: + +```shell +goreman -f Procfile-single-enable-mtls start > goreman.log 2>&1 & +``` + +3. Update `config.yaml` + +```yaml +apisix: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + + admin_api_mtls: + admin_ssl_ca_cert: /path/to/apisix.ca-bundle + admin_ssl_cert: /path/to/foo_server.crt + admin_ssl_cert_key: /path/to/foo_server.key + + ssl: + ssl_trusted_certificate: /path/to/apisix.ca-bundle + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + - "https://127.0.0.1:22379" + - "https://127.0.0.1:32379" + tls: + cert: /path/to/bar_apisix.crt + key: /path/to/bar_apisix.key + sni: etcd.cluster.dev +``` + +4. Test APISIX Admin API + +Start APISIX, if APISIX starts successfully and there is no abnormal output in `logs/error.log`, it means that mTLS communication between APISIX and ETCD is normal. + +Use curl to simulate a client, communicate with APISIX Admin API with mTLS, and create a route: + +```shell +curl -vvv \ + --resolve 'admin.apisix.dev:9180:127.0.0.1' https://admin.apisix.dev:9180/apisix/admin/routes/1 \ + --cert /path/to/foo_client.crt \ + --key /path/to/foo_client.key \ + --cacert /path/to/apisix.ca-bundle \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +A successful mTLS communication between curl and the APISIX Admin API is indicated if the following SSL handshake process is output: + +```shell +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Request CERT (13): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Certificate (11): +* TLSv1.3 (OUT), TLS handshake, CERT verify (15): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +5. Verify APISIX proxy + +```shell +curl http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 298 +Connection: keep-alive +Date: Tue, 26 Jul 2022 16:31:00 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/2.14.1 + +... +``` + +APISIX proxied the request to the `/get` path of the upstream `httpbin.org` and returned `HTTP/1.1 200 OK`. The whole process is working fine using CA bundle instead of CA certificate. diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json index 517fc0f1a37f..fcb04951f9f4 100644 --- a/docs/en/latest/config.json +++ b/docs/en/latest/config.json @@ -1,15 +1,17 @@ { - "version": "2.14.1", + "version": "2.15.0", "sidebar": [ { - "type": "category", - "label": "Architecture Design", - "items": [ - "architecture-design/apisix", - "architecture-design/plugin-config", - "architecture-design/debug-mode", - "architecture-design/deployment-role" - ] + "type": "doc", + "id": "getting-started" + }, + { + "type": "doc", + "id": "installation-guide" + }, + { + "type": "doc", + "id": "architecture-design/apisix" }, { "type": "category", @@ -19,6 +21,7 @@ "terminology/consumer", "terminology/global-rule", "terminology/plugin", + "terminology/plugin-config", "terminology/route", "terminology/router", "terminology/script", @@ -26,14 +29,6 @@ "terminology/upstream" ] }, - { - "type": "doc", - "id": "getting-started" - }, - { - "type": "doc", - "id": "installation-guide" - }, { "type": "category", "label": "Plugins", @@ -49,7 +44,8 @@ "plugins/real-ip", "plugins/server-info", "plugins/ext-plugin-pre-req", - "plugins/ext-plugin-post-req" + "plugins/ext-plugin-post-req", + "plugins/ext-plugin-post-resp" ] }, { @@ -153,7 +149,8 @@ "plugins/google-cloud-logging", "plugins/splunk-hec-logging", "plugins/file-logger", - "plugins/loggly" + "plugins/loggly", + "plugins/elasticsearch-logger" ] } ] @@ -165,12 +162,14 @@ "plugins/serverless", "plugins/azure-functions", "plugins/openwhisk", - "plugins/aws-lambda" + "plugins/aws-lambda", + "plugins/workflow", + "plugins/openfunction" ] }, { "type": "category", - "label": "Other Protocols", + "label": "Other protocols", "items": [ "plugins/dubbo-proxy", "plugins/mqtt-proxy", @@ -193,6 +192,20 @@ } ] }, + { + "type": "category", + "label": "Development", + "items": [ + { + "type": "doc", + "id": "building-apisix" + }, + { + "type": "doc", + "id": "debug-mode" + } + ] + }, { "type": "doc", "id": "FAQ" @@ -210,7 +223,7 @@ "discovery/consul_kv", "discovery/nacos", "discovery/eureka", - "discovery/zookeeper", + "discovery/control-plane-service-discovery", "discovery/kubernetes" ] }, @@ -230,6 +243,10 @@ "xrpc" ] }, + { + "type": "doc", + "id": "deployment-modes" + }, { "type": "doc", "id": "health-check" diff --git a/docs/en/latest/control-api.md b/docs/en/latest/control-api.md index ff7afc28f6ff..7dd55e24fd78 100644 --- a/docs/en/latest/control-api.md +++ b/docs/en/latest/control-api.md @@ -214,7 +214,7 @@ Triggers a full garbage collection in the HTTP subsystem. ### GET /v1/routes -Introduced in [v3.0](https://github.com/apache/apisix/releases/tag/3.0). +Introduced in [v2.10.0](https://github.com/apache/apisix/releases/tag/2.10.0). Returns all configured [Routes](./terminology/route.md): @@ -254,7 +254,7 @@ Returns all configured [Routes](./terminology/route.md): ### GET /v1/route/{route_id} -Introduced in [v3.0](https://github.com/apache/apisix/releases/tag/3.0). +Introduced in [v2.10.0](https://github.com/apache/apisix/releases/tag/2.10.0). Returns the Route with the specified `route_id`: @@ -292,7 +292,7 @@ Returns the Route with the specified `route_id`: ### GET /v1/services -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Returns all the Services: @@ -340,7 +340,7 @@ Returns all the Services: ### GET /v1/service/{service_id} -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Returns the Service with the specified `service_id`: @@ -374,7 +374,7 @@ Returns the Service with the specified `service_id`: ### GET /v1/upstreams -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Dumps all Upstreams: @@ -415,7 +415,7 @@ Dumps all Upstreams: ### GET /v1/upstream/{upstream_id} -Introduced in [v2.11](https://github.com/apache/apisix/releases/tag/2.11). +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). Dumps the Upstream with the specified `upstream_id`: @@ -451,3 +451,40 @@ Dumps the Upstream with the specified `upstream_id`: "modifiedIndex":1225 } ``` + +### GET /v1/plugin_metadatas + +Introduced in [v3.0.0](https://github.com/apache/apisix/releases/tag/3.0.0). + +Dumps all plugin_metadatas: + +```json +[ + { + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" + }, + { + "ikey": 1, + "skey": "val", + "id": "example-plugin" + } +] +``` + +### GET /v1/plugin_metadata/{plugin_name} + +Introduced in [v3.0.0](https://github.com/apache/apisix/releases/tag/3.0.0). + +Dumps the metadata with the specified `plugin_name`: + +```json +{ + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" +} +``` diff --git a/docs/en/latest/debug-function.md b/docs/en/latest/debug-function.md index 1eecfbd1076e..9b9883fb2be5 100644 --- a/docs/en/latest/debug-function.md +++ b/docs/en/latest/debug-function.md @@ -34,7 +34,7 @@ In the response header of the request, through the response header of `X-APISIX- >Example 1: `502` response status code comes from `Upstream` (IP address is not available) ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "upstream": { @@ -75,7 +75,7 @@ It has a response header of `X-APISIX-Upstream-Status: 502`. >Example 2: `502` response status code comes from `APISIX` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -109,7 +109,7 @@ There is no response header for `X-APISIX-Upstream-Status`. >Example 3: `Upstream` has multiple nodes, and all nodes are unavailable ```shell -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "nodes": { "127.0.0.3:1": 1, @@ -122,7 +122,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034 ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream_id": "1" diff --git a/docs/en/latest/debug-mode.md b/docs/en/latest/debug-mode.md new file mode 100644 index 000000000000..e1438d0c3438 --- /dev/null +++ b/docs/en/latest/debug-mode.md @@ -0,0 +1,137 @@ +--- +id: debug-mode +title: Debug mode +keywords: + - API gateway + - Apache APISIX + - Debug mode +description: Guide for enabling debug mode in Apache APISIX. +--- + + + +You can use APISIX's debug mode to troubleshoot your configuration. + +## Basic debug mode + +You can enable the basic debug mode by adding this line to your debug configuration file (`conf/debug.yaml`): + +```yaml title="conf/debug.yaml" +basic: + enable: true +``` + +:::note + +For APISIX releases prior to v2.10, basic debug mode is enabled by setting `apisix.enable_debug = true` in your configuration file (`conf/config.yaml`). + +::: + +If you have configured two Plgins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode. + +```shell +curl http://127.0.0.1:1984/hello -i +``` + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Apisix-Plugins: limit-conn, limit-count +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +Server: openresty + +hello world +``` + +:::info IMPORTANT + +If the debug information cannot be included in a response header (for example, when the Plugin is in a stream subsystem), the debug information will be logged as an error log at a `warn` level. + +::: + +## Advanced debug mode + +You can configure advanced options in debug mode by modifying your debug configuration file (`conf/debug.yaml`). + +The following configurations are available: + +| Key | Required | Default | Description | +|---------------------------------|----------|---------|-----------------------------------------------------------------------------------------------------------------------| +| hook_conf.enable | True | false | Enables/disables hook debug trace. i.e. if enabled, will print the target module function's inputs or returned value. | +| hook_conf.name | True | | Module list name of the hook that enabled the debug trace. | +| hook_conf.log_level | True | warn | Log level for input arguments & returned values. | +| hook_conf.is_print_input_args | True | true | When set to `true` enables printing input arguments. | +| hook_conf.is_print_return_value | True | true | When set to `true` enables printing returned values. | + +:::note + +A checker would check every second for changes to the configuration file. It will only check a file if the file was updated based on its last modification time. + +You can add an `#END` flag to indicate to the checker to only look for changes until that point. + +::: + +The example below shows how you can configure advanced options in debug mode: + +```yaml title="conf/debug.yaml" +hook_conf: + enable: false # Enables/disables hook debug trace + name: hook_phase # Module list name of the hook that enabled the debug trace + log_level: warn # Log level for input arguments & returned values + is_print_input_args: true # When set to `true` enables printing input arguments + is_print_return_value: true # When set to `true` enables printing returned values + +hook_phase: # Module function list, Name: hook_phase + apisix: # Referenced module name + - http_access_phase # Function names:Array + - http_header_filter_phase + - http_body_filter_phase + - http_log_phase +#END +``` + +### Dynamically enable advanced debug mode + +You can also enable advanced debug mode only on particular requests. + +The example below shows how you can enable it on requests with the header `X-APISIX-Dynamic-Debug`: + +```yaml title="conf/debug.yaml" +http_filter: + enable: true # Enable/disable advanced debug mode dynamically + enable_header_name: X-APISIX-Dynamic-Debug # Trace for the request with this header +... +#END +``` + +This will enable the advanced debug mode only for requests like: + +```shell +curl 127.0.0.1:9090/hello --header 'X-APISIX-Dynamic-Debug: foo' +``` + +:::note + +The `apisix.http_access_phase` module cannot be hooked for this dynamic rule as the advanced debug mode is enabled based on the request. + +::: diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md new file mode 100644 index 000000000000..d4180f419044 --- /dev/null +++ b/docs/en/latest/deployment-modes.md @@ -0,0 +1,167 @@ +--- +title: Deployment modes +keywords: + - API gateway + - Apache APISIX + - APISIX deployment modes +description: Documentation about the three deployment modes of Apache APISIX. +--- + + +APISIX has three different deployment modes for different production use cases. The table below summarises the deployment modes: + +| Deployment mode | Roles | Description | +|-----------------|----------------------------|-----------------------------------------------------------------------------------------------------------| +| traditional | traditional | Data plane and control plane are deployed together. `enable_admin` attribute should be disabled manually. | +| decoupled | data_plane / control_plane | Data plane and control plane are deployed independently. | +| standalone | data_plane | Only data plane is deployed and the configurations are loaded from a local YAML file. | + +Each of these deployment modes are explained in detail below. + +## Traditional + +In the traditional deployment mode, one instance of APISIX will be both the data plane and the control plane. + +![traditional deployment mode](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-traditional.png) + +There will be a conf server that listens on the UNIX socket and acts as a proxy between APISIX and etcd. Both the data and the control planes connect to this conf server via HTTP. + +An example configuration of the traditional deployment mode is shown below: + +```yaml title="conf/config.yaml" +apisix: + node_listen: + - port: 9080 + admin_listen: + port: 9180 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://${IP}:${Port} + prefix: /apisix + timeout: 30 +``` + +The instance of APISIX deployed as the traditional role will: + +1. Listen on port `9080` to handle user requests, controlled by `node_listen`. +2. Listen on port `9180` to handle Admin API requests, controlled by `admin_listen`. + +## Decoupled + +In the decoupled deployment mode the data plane and control plane instances of APISIX are deployed separately. i.e one instance of APISIX is configured to be a data plane and the other to be a control plane. + +![decoupled](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-cp_and_dp.png) + +The instance of APISIX deployed as the data plane will: + +1. Fetch the configuration from the control plane. The default port is `9280`. +2. Performs a health check on all configured control plane addresses before starting the service. + 1. If the control plane addresses are unavailable, the startup fails and an exception is thrown. + 2. If at least one control plane address is available, it prints the unhealthy control planes logs, and starts the APISIX service. + 3. If all control planes are normal, APISIX service is started normally. +3. Once the service is started, it will handle the user requests. + +The example below shows the configuration of an APISIX instance as data plane in the decoupled mode: + +```yaml title="conf/config.yaml" +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - ${IP}:9280 + timeout: 30 + certs: + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + trusted_ca_cert: /path/to/ca-cert +``` + +The instance of APISIX deployed as the control plane will: + +1. Listen on port `9180` and handle Admin API requests. +2. Provide the conf server which will listen on port `9280`. Both the control plane and the data plane will connect to this via HTTPS enforced by mTLS. + +The example below shows the configuration of an APISIX instance as control plane in the decoupled mode: + +```yaml title="conf/config.yaml" +deployment: + role: control_plane + role_control_plan: + config_provider: etcd + conf_server: + listen: 0.0.0.0:9280 + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + client_ca_cert: /path/to/ca-cert + etcd: + host: + - https://${IP}:${Port} + prefix: /apisix + timeout: 30 + certs: + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + trusted_ca_cert: /path/to/ca-cert +``` + +:::tip + +As OpenResty <= 1.21.4 does not support sending mTLS requests, to accept connections from APISIX running on these OpenResty versions, you need to disable the client certificate verification in the control plane instance as shown below: + +```yaml title="conf/config.yaml" +deployment: + role: control_plane + role_control_plan: + config_provider: etcd + conf_server: + listen: 0.0.0.0:9280 + cert: /path/to/ca-cert + cert_key: /path/to/ca-cert + etcd: + host: + - https://${IP}:${Port} + prefix: /apisix + timeout: 30 + certs: + trusted_ca_cert: /path/to/ca-cert +``` + +::: + +## Standalone + +In the standalone deployment mode, APISIX is deployed as a data plane and it reads in configurations from a YAML file (`apisix.yaml`) in the local file system. + +This deployment mode is useful when you have to declaratively define the configuration or when you are using a different configuration center other than etcd. + +To configure APISIX in standalone mode: + +```yaml title="conf/config.yaml" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +``` diff --git a/docs/en/latest/discovery.md b/docs/en/latest/discovery.md index 5442c874eb29..9a7bbfef7742 100644 --- a/docs/en/latest/discovery.md +++ b/docs/en/latest/discovery.md @@ -51,7 +51,7 @@ It is very easy for APISIX to extend the discovery client, the basic steps are a First, create a directory `eureka` under `apisix/discovery`; -After that, add [`init.lua`](../../../apisix/discovery/eureka/init.lua) in the `apisix/discovery/eureka` directory; +After that, add [`init.lua`](https://github.com/apache/apisix/blob/master/apisix/discovery/init.lua) in the `apisix/discovery/eureka` directory; Then implement the `_M.init_worker()` function for initialization and the `_M.nodes(service_name)` function for obtaining the list of service instance nodes in `init.lua`: @@ -185,7 +185,7 @@ discovery: Here is an example of routing a request with a URL of "/user/*" to a service which named "user-service" and use eureka discovery client in the registry : ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/user/*", "upstream": { @@ -202,13 +202,13 @@ Transfer-Encoding: chunked Connection: keep-alive Server: APISIX web server -{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} ``` Because the upstream interface URL may have conflict, usually in the gateway by prefix to distinguish: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/a/*", "plugins": { @@ -223,7 +223,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f } }' -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/b/*", "plugins": { diff --git a/docs/en/latest/discovery/consul_kv.md b/docs/en/latest/discovery/consul_kv.md index 7826c66b753b..b370b6bcd38c 100644 --- a/docs/en/latest/discovery/consul_kv.md +++ b/docs/en/latest/discovery/consul_kv.md @@ -134,7 +134,7 @@ To avoid confusion, use the full consul key url path as service name in practice Here is an example of routing a request with a URL of "/*" to a service which named "http://127.0.0.1:8500/v1/kv/upstreams/webpages/" and use consul_kv discovery client in the registry : ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/*", "upstream": { @@ -166,8 +166,7 @@ The format response as below: "status": 1 }, "key": "/apisix/routes/1" - }, - "action": "set" + } } ``` diff --git a/docs/en/latest/discovery/control-plane-service-discovery.md b/docs/en/latest/discovery/control-plane-service-discovery.md new file mode 100644 index 000000000000..a18bdc63d38d --- /dev/null +++ b/docs/en/latest/discovery/control-plane-service-discovery.md @@ -0,0 +1,72 @@ +--- +title: Control Plane Service Discovery +keywords: + - API Geteway + - APISIX + - ZooKeeper + - Nacos + - APISIX-Seed +description: This documentation describes implement service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane. +--- + + + +This document describes how to implement service discovery with Nacos and Zookeeper on the APISIX Control Plane. + +## APISIX-Seed Architecture + +Apache APISIX has supported Data Plane service discovery in the early days, and now APISIX also supports Control Plane service discovery through the [APISIX-Seed](https://github.com/api7/apisix-seed) project. The following figure shows the APISIX-Seed architecture diagram. + +![control-plane-service-discovery](../../../assets/images/control-plane-service-discovery.png) + +The specific information represented by the figures in the figure is as follows: + +1. Register an upstream with APISIX and specify the service discovery type. APISIX-Seed will watch APISIX resource changes in etcd, filter discovery types, and obtain service names. +2. APISIX-Seed subscribes the specified service name to the service registry to obtain changes to the corresponding service. +3. After the client registers the service with the service registry, APISIX-Seed will obtain the new service information and write the updated service node into etcd; +4. When the corresponding resources in etcd change, APISIX worker will refresh the latest service node information to memory. + +:::note + +It should be noted that after the introduction of APISIX-Seed, if the service of the registry changes frequently, the data in etcd will also change frequently. So, it is best to set the `--auto-compaction` option when starting etcd to compress the history periodically to avoid etcd eventually exhaust its storage space. Please refer to [revisions](https://etcd.io/docs/v3.5/learning/api/#revisions). + +::: + +## Why APISIX-Seed + +- Network topology becomes simpler + + APISIX does not need to maintain a network connection with each registry, and only needs to pay attention to the configuration information in etcd. This will greatly simplify the network topology. + +- Total data volume about upstream service becomes smaller + + Due to the characteristics of the registry, APISIX may store the full amount of registry service data in the worker, such as consul_kv. By introducing APISIX-Seed, each process of APISIX will not need to additionally cache upstream service-related information. + +- Easier to manage + + Service discovery configuration needs to be configured once per APISIX instance. By introducing APISIX-Seed, Apache APISIX will be in different to the configuration changes of the service registry. + +## Supported service registry + +ZooKeeper and Nacos are currently supported, and more service registries will be supported in the future. For more information, please refer to: [APISIX Seed](https://github.com/api7/apisix-seed#apisix-seed-for-apache-apisix). + +- If you want to enable control plane ZooKeeper service discovery, please refer to: [ZooKeeper Deployment Tutorial](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md). + +- If you want to enable control plane Nacos service discovery, please refer to: [Nacos Deployment Tutorial](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md). diff --git a/docs/en/latest/discovery/kubernetes.md b/docs/en/latest/discovery/kubernetes.md index 0bf743128951..eb931ba35077 100644 --- a/docs/en/latest/discovery/kubernetes.md +++ b/docs/en/latest/discovery/kubernetes.md @@ -52,6 +52,8 @@ discovery: # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI + default_weight: 50 # weight assigned to each discovered endpoint. default 50, minimum 0 + # kubernetes discovery plugin support use namespace_selector # you can use one of [equal, not_equal, match, not_match] filter namespace namespace_selector: diff --git a/docs/en/latest/discovery/nacos.md b/docs/en/latest/discovery/nacos.md index a35a2ac2890b..684098e7fea2 100644 --- a/docs/en/latest/discovery/nacos.md +++ b/docs/en/latest/discovery/nacos.md @@ -59,7 +59,7 @@ discovery: Here is an example of routing a request with an URI of "/nacos/*" to a service which named "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS" and use nacos discovery client in the registry: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacos/*", "upstream": { @@ -92,8 +92,7 @@ The formatted response as below: "priority": 0, "uri": "\/nacos\/*" } - }, - "action": "set" + } } ``` @@ -109,7 +108,7 @@ The formatted response as below: Example of routing a request with an URI of "/nacosWithNamespaceId/*" to a service which name, namespaceId "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns" and use nacos discovery client in the registry: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacosWithNamespaceId/*", "upstream": { @@ -148,8 +147,7 @@ The formatted response as below: "priority": 0, "uri": "\/nacosWithNamespaceId\/*" } - }, - "action": "set" + } } ``` @@ -158,7 +156,7 @@ The formatted response as below: Example of routing a request with an URI of "/nacosWithGroupName/*" to a service which name, groupName "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&groupName=test_group" and use nacos discovery client in the registry: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacosWithGroupName/*", "upstream": { @@ -197,8 +195,7 @@ The formatted response as below: "priority": 0, "uri": "\/nacosWithGroupName\/*" } - }, - "action": "set" + } } ``` @@ -207,7 +204,7 @@ The formatted response as below: Example of routing a request with an URI of "/nacosWithNamespaceIdAndGroupName/*" to a service which name, namespaceId, groupName "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns&groupName=test_group" and use nacos discovery client in the registry: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/4 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/4 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacosWithNamespaceIdAndGroupName/*", "upstream": { @@ -248,7 +245,6 @@ The formatted response as below: "priority": 0, "uri": "\/nacosWithNamespaceIdAndGroupName\/*" } - }, - "action": "set" + } } ``` diff --git a/docs/en/latest/discovery/zookeeper.md b/docs/en/latest/discovery/zookeeper.md deleted file mode 100644 index 3adf52dc9735..000000000000 --- a/docs/en/latest/discovery/zookeeper.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: zookeeper ---- - - - -## Service Discovery Via Zookeeper - -`Zookeeper` service discovery needs to rely on the [apisix-seed](https://github.com/api7/apisix-seed) project. - -### How `apisix-seed` Works - -![APISIX-SEED](../../../assets/images/apisix-seed.svg) - -`apisix-seed` completes data exchange by watching the changes of `etcd` and `zookeeper` at the same time. - -The process is as follows: - -1. `APISIX` registers an upstream and specifies the service discovery type as `zookeeper` to `etcd`. -2. `apisix-seed` watches the resource changes of `APISIX` in `etcd` and filters the discovery type and obtains the service name. -3. `apisix-seed` binds the service to the `etcd` resource and starts watching the service in zookeeper. -4. The client registers the service with `zookeeper`. -5. `apisix-seed` gets the service changes in `zookeeper`. -6. `apisix-seed` queries the bound `etcd` resource information through the service name, and writes the updated service node to `etcd`. -7. The `APISIX` worker watches `etcd` changes and refreshes the service node information to the memory. - -### Setting `apisix-seed` and Zookeeper - -The configuration steps are as follows: - -1. Start the Zookeeper service - -```bash -docker run -itd --rm --name=dev-zookeeper -p 2181:2181 zookeeper:3.7.0 -``` - -2. Download and compile the `apisix-seed` project. - -```bash -git clone https://github.com/api7/apisix-seed.git -cd apisix-seed -go build -``` - -3. Modify the `apisix-seed` configuration file, config path `conf/conf.yaml`. - -```bash -etcd: # APISIX ETCD Configure - host: - - "http://127.0.0.1:2379" - prefix: /apisix - timeout: 30 - -discovery: - zookeeper: # Zookeeper Service Discovery - hosts: - - "127.0.0.1:2181" # Zookeeper service address - prefix: /zookeeper - weight: 100 # default weight for node - timeout: 10 # default 10s -``` - -4. Start `apisix-seed` to monitor service changes - -```bash -./apisix-seed -``` - -### Setting `APISIX` Route and Upstream - -Set a route, the request path is `/zk/*`, the upstream uses zookeeper as service discovery, and the service name -is `APISIX-ZK`. - -```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ --H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' -{ - "uri": "/zk/*", - "upstream": { - "service_name": "APISIX-ZK", - "type": "roundrobin", - "discovery_type": "zookeeper" - } -}' -``` - -### Register Service and verify Request - -1. Service registration using Zookeeper CLI - -- Register Service - -```bash -# Login Container -docker exec -it ${CONTAINERID} /bin/bash -# Login Zookeeper Client -oot@ae2f093337c1:/apache-zookeeper-3.7.0-bin# ./bin/zkCli.sh -# Register Service -[zk: localhost:2181(CONNECTED) 0] create /zookeeper/APISIX-ZK '{"host":"127.0.0.1:1980","weight":100}' -``` - -- Successful Response - -```bash -Created /zookeeper/APISIX-ZK -``` - -2. Verify Request - -- Request - -```bash -curl -i http://127.0.0.1:9080/zk/hello -``` - -- Response - -```bash -HTTP/1.1 200 OK -Connection: keep-alive -Content-Type: text/html; charset=utf-8 -Date: Tue, 29 Mar 2022 08:51:28 GMT -Server: APISIX/2.12.0 -Transfer-Encoding: chunked - -hello -``` diff --git a/docs/en/latest/getting-started.md b/docs/en/latest/getting-started.md index 8b393fcc828e..5191b27b02d7 100644 --- a/docs/en/latest/getting-started.md +++ b/docs/en/latest/getting-started.md @@ -162,7 +162,7 @@ You can check out [Installing Apache APISIX](./installation-guide.md) for differ :::info IMPORTANT -Make sure that all the required ports (default: 9080, 9443 and 2379) are available and not used by other system processes. +Make sure that all the required ports (default: 9080, 9180, 9443 and 2379) are available and not used by other system processes. On Unix-based systems, you can run the command below to terminate a process listening on a specific port: @@ -181,7 +181,7 @@ docker logs -f --tail Once APISIX is running, you can use `curl` to access the Admin API. You can also check if APISIX is running properly by running this command and checking the response. ```bash -curl "http://127.0.0.1:9080/apisix/admin/services/" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +curl "http://127.0.0.1:9180/apisix/admin/services/" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ``` This response indicates that APISIX is running successfully: @@ -189,7 +189,6 @@ This response indicates that APISIX is running successfully: ```json { "count":0, - "action":"get", "node":{ "key":"/apisix/services", "nodes":[], @@ -207,7 +206,7 @@ APISIX provides a powerful [Admin API](./admin-api.md) and [APISIX Dashboard](ht We will configure the Route so that APISIX can forward the request to the corresponding Upstream service: ```bash -curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "methods": ["GET"], "host": "example.com", @@ -242,7 +241,7 @@ Instead of configuring the Upstream directly to the Route, you can create an Ups To create an Upstream object: ```bash -curl "http://127.0.0.1:9080/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "type": "roundrobin", "nodes": { @@ -256,7 +255,7 @@ This is the same as the Upstream service we configured directly into the Route o To bind this Upstream to the Route, we can use the `upstream_id` as `1`: ```bash -curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "methods": ["GET"], "host": "example.com", diff --git a/docs/en/latest/grpc-proxy.md b/docs/en/latest/grpc-proxy.md index 5446263919d3..ffc7118c04da 100644 --- a/docs/en/latest/grpc-proxy.md +++ b/docs/en/latest/grpc-proxy.md @@ -41,7 +41,7 @@ Here's an example, to proxying gRPC service by specified route: * the grpc server example:[grpc_server_example](https://github.com/api7/grpc_server_example) ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["POST", "GET"], "uri": "/helloworld.Greeter/SayHello", @@ -99,7 +99,7 @@ This means that the proxying is working. If your gRPC service encrypts with TLS by itself (so called `gPRCS`, gPRC + TLS), you need to change the `scheme` to `grpcs`. The example above runs gRPCS service on port 50052, to proxy gRPC request, we need to use the configuration below: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["POST", "GET"], "uri": "/helloworld.Greeter/SayHello", diff --git a/docs/en/latest/health-check.md b/docs/en/latest/health-check.md index 3f16d09309bf..5e0f7a26a121 100644 --- a/docs/en/latest/health-check.md +++ b/docs/en/latest/health-check.md @@ -64,7 +64,7 @@ it whether this unique node is healthy or not. ### Configuration example ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { diff --git a/docs/en/latest/install-dependencies.md b/docs/en/latest/install-dependencies.md index 08ada40c185c..0deacec48c37 100644 --- a/docs/en/latest/install-dependencies.md +++ b/docs/en/latest/install-dependencies.md @@ -31,8 +31,6 @@ title: Install Dependencies - On some platforms, installing LuaRocks via the package manager will cause Lua to be upgraded to Lua 5.3, so we recommend installing LuaRocks via source code. if you install OpenResty and its OpenSSL develop library (openresty-openssl111-devel for rpm and openresty-openssl111-dev for deb) via the official repository, then [we provide a script for automatic installation](https://github.com/apache/apisix/blob/master/utils/linux-install-luarocks.sh). If you compile OpenResty yourself, you can refer to the above script and change the path in it. If you don't specify the OpenSSL library path when you compile, you don't need to configure the OpenSSL variables in LuaRocks, because the system's OpenSSL is used by default. If the OpenSSL library is specified at compile time, then you need to ensure that LuaRocks' OpenSSL configuration is consistent with OpenResty's. -- WARNING: If you are using OpenResty which is older than `1.17.8`, please installing openresty-openss-devel instead of openresty-openssl111-devel. - - OpenResty is a dependency of APISIX. If it is your first time to deploy APISIX and you don't need to use OpenResty to deploy other services, you can stop and disable OpenResty after installation since it will not affect the normal work of APISIX. Please operate carefully according to your service. For example in Ubuntu: `systemctl stop openresty && systemctl disable openresty`. ## Install diff --git a/docs/en/latest/installation-guide.md b/docs/en/latest/installation-guide.md index 2384ba31b191..a383d2505196 100644 --- a/docs/en/latest/installation-guide.md +++ b/docs/en/latest/installation-guide.md @@ -43,6 +43,7 @@ APISIX can be installed by the different methods listed below: {label: 'Docker', value: 'docker'}, {label: 'Helm', value: 'helm'}, {label: 'RPM', value: 'rpm'}, + {label: 'Source Code', value: 'source code'}, ]}> @@ -166,6 +167,12 @@ Run `apisix help` to get a list of all available operations. ::: + + + + +If you want to build APISIX from source, please refer to [Building APISIX from source](./building-apisix.md). + @@ -185,7 +192,7 @@ It would be installed automatically if you choose the Docker or Helm install met ```shell -ETCD_VERSION='3.4.18' +ETCD_VERSION='3.5.4' wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ cd etcd-v${ETCD_VERSION}-linux-amd64 && \ @@ -207,6 +214,57 @@ brew services start etcd ## Next steps +### Configuring APISIX + +You can configure your APISIX deployment in two ways: + +1. By directly changing your configuration file (`conf/config.yaml`). +2. By using the `--config` or the `-c` flag to pass the path to your configuration file while starting APISIX. + + ```shell + apisix start -c + ``` + +APISIX will use the configurations added in this configuration file and will fall back to the default configuration if anything is not configured. + +For example, to configure the default listening port to be `8000` without changing other configurations, your configuration file could look like this: + +```yaml title="conf/config.yaml" +apisix: + node_listen: 8000 +``` + +Now, if you decide you want to change the etcd address to `http://foo:2379`, you can add it to your configuration file. This will not change other configurations. + +```yaml title="conf/config.yaml" +apisix: + node_listen: 8000 + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://foo:2379" +``` + +:::warning + +APISIX's default configuration can be found in `conf/config-default.yaml` file and it should not be modified. It is bound to the source code and the configuration should only be changed by the methods mentioned above. + +::: + +:::warning + +The `conf/nginx.conf` file is automatically generated and should not be modified. + +::: + +### APISIX deployment modes + +APISIX has three different deployment modes for different use cases. To learn more and configure deployment modes, see the [documentation](./deployment-modes.md). + ### Updating Admin API key It is recommended to modify the Admin API key to ensure security. @@ -225,7 +283,7 @@ apisix: Now, to access the Admin API, you can use the new key: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes?api_key=newsupersecurekey -i +curl http://127.0.0.1:9180/apisix/admin/routes?api_key=newsupersecurekey -i ``` ### Adding APISIX systemd unit file diff --git a/docs/en/latest/internal/testing-framework.md b/docs/en/latest/internal/testing-framework.md index 36dd31d6e6c1..846b2fa5545b 100644 --- a/docs/en/latest/internal/testing-framework.md +++ b/docs/en/latest/internal/testing-framework.md @@ -291,7 +291,7 @@ The default log level is `info`, but you can get the debug level log with `-- lo The test framework listens to multiple ports when it is started. -* 1980/1981/1982/5044: HTTP upstream port +* 1980/1981/1982/5044: HTTP upstream port. We provide a mock upstream server for testing. See below for more information. * 1983: HTTPS upstream port * 1984: APISIX HTTP port. Can be used to verify HTTP related gateway logic, such as concurrent access to an API. * 1985: APISIX TCP port. Can be used to verify TCP related gateway logic, such as concurrent access to an API. @@ -309,7 +309,7 @@ Note that before adding new methods to `t/lib/server.lua`, make sure that you ca Assume your current work directory is the root of the apisix source code. -1. Install our fork of [test-nginx](https://github.com/openresty/test-nginx) to `../test-nginx`. +1. Git clone the latest [test-nginx](https://github.com/openresty/test-nginx) to `../test-nginx`. 2. Run the test: `prove -I. -I../test-nginx/inc -I../test-nginx/lib -r t/path/to/file.t`. ## Tips @@ -351,7 +351,7 @@ ONLY: --- config ... --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/upstreams","nodes":[]}} +{"count":0,"node":{"dir":true,"key":"/apisix/upstreams","nodes":[]}} ``` ### Executing Shell Commands diff --git a/docs/en/latest/mtls.md b/docs/en/latest/mtls.md index b46e7d7b81e2..2cf7a87efd4b 100644 --- a/docs/en/latest/mtls.md +++ b/docs/en/latest/mtls.md @@ -36,7 +36,9 @@ The clients will provide their certificates to the server and the server will ch 2. Modify configuration items in `conf/config.yaml`: ```yaml - port_admin: 9180 + admin_listen: + ip: 127.0.0.1 + port: 9180 https_admin: true admin_api_mtls: @@ -66,13 +68,17 @@ curl --cacert /data/certs/mtls_ca.crt --key /data/certs/mtls_client.key --cert / ### How to configure -You need to build [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment?) and configure `etcd.tls` section if you want APISIX to work on an etcd cluster with mTLS enabled. +You need to build [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment) and configure `etcd.tls` section if you want APISIX to work on an etcd cluster with mTLS enabled. ```yaml -etcd: - tls: - cert: /data/certs/etcd_client.pem # path of certificate used by the etcd client - key: /data/certs/etcd_client.key # path of key used by the etcd client +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + tls: + cert: /data/certs/etcd_client.pem # path of certificate used by the etcd client + key: /data/certs/etcd_client.key # path of key used by the etcd client ``` If APISIX does not trust the CA certificate that used by etcd server, we need to set up the CA certificate. @@ -126,7 +132,7 @@ if len(sys.argv) >= 5: reqParam["client"]["ca"] = clientCert if len(sys.argv) >= 6: reqParam["client"]["depth"] = int(sys.argv[5]) -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json=reqParam, headers={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json=reqParam, headers={ "X-API-KEY": api_key, }) print(resp.status_code) @@ -183,7 +189,7 @@ reqParam = { }, } -resp = requests.patch("http://127.0.0.1:9080/apisix/admin/upstreams/"+id, json=reqParam, headers={ +resp = requests.patch("http://127.0.0.1:9180/apisix/admin/upstreams/"+id, json=reqParam, headers={ "X-API-KEY": api_key, }) print(resp.status_code) diff --git a/docs/en/latest/plugin-develop.md b/docs/en/latest/plugin-develop.md index 631130ad7f3c..ca1d3a23f019 100644 --- a/docs/en/latest/plugin-develop.md +++ b/docs/en/latest/plugin-develop.md @@ -90,7 +90,7 @@ nginx_config: ``` The plugin itself provides the init method. It is convenient for plugins to perform some initialization after -the plugin is loaded. +the plugin is loaded. If you need to clean up the initialization, you can put it in the corresponding destroy method. Note : if the dependency of some plugin needs to be initialized when Nginx start, you may need to add logic to the initialization method "http_init" in the file __apisix/init.lua__, and you may need to add some processing on generated part of Nginx @@ -334,7 +334,7 @@ end Write the logic of the plugin in the corresponding phase. There are two parameters `conf` and `ctx` in the phase method, take the `limit-conn` plugin configuration as an example. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -516,7 +516,7 @@ The above test case represents a simple scenario. Most scenarios will require mu Additionally, there are some convenience testing endpoints which can be found [here](https://github.com/apache/apisix/blob/master/t/lib/server.lua#L36). For example, see [proxy-rewrite](https://github.com/apache/apisix/blob/master/t/plugin/proxy-rewrite.lua). In test 42, the upstream `uri` is made to redirect `/test?new_uri=hello` to `/hello` (which always returns `hello world`). In test 43, the response body is confirmed to equal `hello world`, meaning the proxy-rewrite configuration added with test 42 worked correctly. -Refer the following [document](how-to-build.md) to setup the testing framework. +Refer the following [document](building-apisix.md) to setup the testing framework. ### attach the test-nginx execution process: diff --git a/docs/en/latest/plugins/api-breaker.md b/docs/en/latest/plugins/api-breaker.md index 4469b5a31d40..63632a6be825 100644 --- a/docs/en/latest/plugins/api-breaker.md +++ b/docs/en/latest/plugins/api-breaker.md @@ -2,10 +2,9 @@ title: api-breaker keywords: - APISIX - - Plugin + - API Gateway - API Breaker - - api-breaker -description: This document contains information about the Apache APISIX api-breaker Plugin. +description: This document describes the information about the Apache APISIX api-breaker Plugin, you can use it to protect Upstream services. --- + +## Description + +The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage. + +When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Description | +| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ | +| endpoint_addr | string | True | | Elasticsearch API. | +| field | array | True | | Elasticsearch `field` configuration. | +| field.index | string | True | | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field). | +| field.type | string | False | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field). | +| auth | array | False | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration. | +| auth.username | string | True | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username. | +| auth.password | string | True | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password. | +| ssl_verify | boolean | False | true | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | +| timeout | integer | False | 10 | Elasticsearch send data timeout in seconds. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Enabling the Plugin + +### Full configuration + +The example below shows a complete configuration of the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services", + "type":"collector" + }, + "auth":{ + "username":"elastic", + "password":"123456" + }, + "ssl_verify":false, + "timeout": 60, + "retry_delay":1, + "buffer_duration":60, + "max_retry_count":0, + "batch_max_size":1000, + "inactive_timeout":5, + "name":"elasticsearch-logger" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +### Minimal configuration example + +The example below shows a bare minimum configuration of the Plugin on a Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services" + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +## Example usage + +Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server: + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +You should be able to get the log from elasticsearch: + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "M1qAxYIBRmRqWkmH4Wya", + "_score": 1, + "_source": { + "apisix_latency": 0, + "route_id": "1", + "server": { + "version": "2.15.0", + "hostname": "apisix" + }, + "request": { + "size": 102, + "uri": "/elasticsearch.do?q=hello", + "querystring": { + "q": "hello" + }, + "headers": { + "user-agent": "curl/7.29.0", + "host": "127.0.0.1:9080", + "accept": "*/*" + }, + "url": "http://127.0.0.1:9080/elasticsearch.do?q=hello", + "method": "GET" + }, + "service_id": "", + "latency": 0, + "upstream": "127.0.0.1:1980", + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "start_time": 1661170929107, + "response": { + "size": 192, + "headers": { + "date": "Mon, 22 Aug 2022 12:22:09 GMT", + "server": "APISIX/2.15.0", + "content-type": "text/plain; charset=utf-8", + "connection": "close", + "transfer-encoding": "chunked" + }, + "status": 200 + } + } + } + ] + } +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `elasticsearch-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + + make a request to APISIX again: + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +You should be able to get this log from elasticsearch: + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1, + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "NVqExYIBRmRqWkmH4WwG", + "_score": 1, + "_source": { + "@timestamp": "2022-08-22T20:26:31+08:00", + "client_ip": "127.0.0.1", + "host": "127.0.0.1", + "route_id": "1" + } + } + ] + } +} +``` + +### Disable Metadata + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +``` + +## Disable Plugin + +To disable the `elasticsearch-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{}, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` diff --git a/docs/en/latest/plugins/error-log-logger.md b/docs/en/latest/plugins/error-log-logger.md index 57fc5732e4a3..a37a7a5ffbbd 100644 --- a/docs/en/latest/plugins/error-log-logger.md +++ b/docs/en/latest/plugins/error-log-logger.md @@ -1,7 +1,12 @@ --- title: error-log-logger +keywords: + - APISIX + - API Gateway + - Plugin + - Error log logger +description: This document contains information about the Apache APISIX error-log-logger Plugin. --- - + +## Description + +The `ext-plugin-post-resp` Plugin is for running specific external Plugins in the Plugin Runner before executing the built-in Lua Plugins. + +The `ext-plugin-post-resp` plugin will be executed after the request gets a response from the upstream. + +After enabling this plugin, APISIX will use the [lua-resty-http](https://github.com/api7/lua-resty-http) library to make requests to the upstream, this results in: + +- [proxy-control](./proxy-control.md) plugin is not available +- [proxy-mirror](./proxy-mirror.md) plugin is not available +- [proxy-cache](./proxy-cache.md) plugin is not available +- [mTLS Between APISIX and Upstream](../mtls.md#mtls-between-apisix-and-upstream) function is not available yet + +See [External Plugin](../external-plugin.md) to learn more. + +:::note + +Execution of External Plugins will affect the response of the current request. + +External Plugin does not yet support getting request context information. + +External Plugin does not yet support getting the response body of an upstream response. + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------|---------|----------|---------|-----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------| +| conf | array | False | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | List of Plugins and their configurations to be executed on the Plugin Runner. | +| allow_degradation | boolean | False | false | | Sets Plugin degradation when the Plugin Runner is not available. When set to `true`, requests are allowed to continue. | + +## Enabling the Plugin + +The example below enables the `ext-plugin-post-resp` Plugin on a specific Route: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "ext-plugin-post-resp": { + "conf" : [ + {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"} + ] + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the External Plugin as shown above, you can make a request to execute the Plugin: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +This will reach the configured Plugin Runner and the `ext-plugin-A` will be executed. + +## Disable Plugin + +To disable the `ext-plugin-post-resp` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/ext-plugin-pre-req.md b/docs/en/latest/plugins/ext-plugin-pre-req.md index efe2dbf8a8aa..3992d059eeed 100644 --- a/docs/en/latest/plugins/ext-plugin-pre-req.md +++ b/docs/en/latest/plugins/ext-plugin-pre-req.md @@ -50,7 +50,7 @@ Execution of External Plugins will affect the behavior of the current request. The example below enables the `ext-plugin-pre-req` Plugin on a specific Route: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -83,7 +83,7 @@ This will reach the configured Plugin Runner and the `ext-plugin-A` will be exec To disable the `ext-plugin-pre-req` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "upstream": { diff --git a/docs/en/latest/plugins/fault-injection.md b/docs/en/latest/plugins/fault-injection.md index 296786a28237..4aa20c6b9582 100644 --- a/docs/en/latest/plugins/fault-injection.md +++ b/docs/en/latest/plugins/fault-injection.md @@ -78,7 +78,7 @@ This means that the relationship between the first two expressions is AND, and t You can enable the `fault-injection` Plugin on a specific Route as shown below: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -101,7 +101,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 Similarly, to enable a `delay` fault: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -123,7 +123,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 You can also enable the Plugin with both `abort` and `delay` which can have `vars` for matching: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -202,7 +202,7 @@ sys 0m0.010s You can enable the `fault-injection` Plugin with the `vars` attribute to set specific rules: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -268,7 +268,7 @@ Fault Injection! To disable the `fault-injection` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, diff --git a/docs/en/latest/plugins/file-logger.md b/docs/en/latest/plugins/file-logger.md index 27bc93d69089..634f7e178e90 100644 --- a/docs/en/latest/plugins/file-logger.md +++ b/docs/en/latest/plugins/file-logger.md @@ -1,5 +1,11 @@ --- title: file-logger +keywords: + - APISIX + - API Gateway + - Plugin + - File Logger +description: This document contains information about the Apache APISIX file-logger Plugin. --- - ## Example usage -The example above configures the Plugin to only allow one concurrent request. When more than one request is received, the Plugin will respond with a 503 status code: +The example above configures the Plugin to only allow one connection on this route. When more than one request is received, the Plugin will respond with a `503` HTTP status code and reject the connection: -```bash +```shell curl -i http://127.0.0.1:9080/index.html?sleep=20 & curl -i http://127.0.0.1:9080/index.html?sleep=20 ``` -```bash +```shell + +503 Service Temporarily Unavailable + +

503 Service Temporarily Unavailable

+
openresty
+ + +``` + +## Limit the number of concurrent WebSocket connections + +Apache APISIX supports WebSocket proxy, we can use `limit-conn` plugin to limit the number of concurrent WebSocket connections. + +1. Create a Route, enable the WebSocket proxy and the `limit-conn` plugin. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/ws", + "enable_websocket": true, + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +The above route enables the WebSocket proxy on `/ws`, and limits the number of concurrent WebSocket connections to 1. More than 1 concurrent WebSocket connection will return `503` to reject the request. + +2. Initiate a WebSocket request, and the connection is established successfully. + +```shell +curl --include \ + --no-buffer \ + --header "Connection: Upgrade" \ + --header "Upgrade: websocket" \ + --header "Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==" \ + --header "Sec-WebSocket-Version: 13" \ + --http1.1 \ + http://127.0.0.1:9080/ws +``` + +```shell +HTTP/1.1 101 Switching Protocols +``` + +3. Initiate the WebSocket request again in another terminal, the request will be rejected. + +```shell +HTTP/1.1 503 Service Temporarily Unavailable +··· 503 Service Temporarily Unavailable @@ -125,8 +188,9 @@ curl -i http://127.0.0.1:9080/index.html?sleep=20 To disable the `limit-conn` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. -```bash -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md index 20dbfbc97dda..0a74b24e8c99 100644 --- a/docs/en/latest/plugins/limit-count.md +++ b/docs/en/latest/plugins/limit-count.md @@ -58,7 +58,7 @@ The `limit-count` Plugin limits the number of requests to your service by a give You can enable the Plugin on a Route as shown below: ```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -82,7 +82,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 You can also configure the `key_type` to `var_combination` as shown: ```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -108,7 +108,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 You can also create a group to share the same counter across multiple Routes: ```bash -curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "limit-count": { @@ -131,7 +131,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f0343 Now every Route which belongs to group `services_1#1640140620` (or the service with ID `1`) will share the same counter. ```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "service_id": "1", "uri": "/hello" @@ -139,7 +139,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 ``` ```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "service_id": "1", "uri": "/hello2" @@ -157,7 +157,7 @@ HTTP/1.1 200 ... You can also share the same limit counter for all your requests by setting the `key_type` to `constant`: ```bash -curl -i http://127.0.0.1:9080/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/services/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "limit-count": { @@ -185,7 +185,7 @@ For cluster-level traffic limiting, you can use a Redis server. The counter will The example below shows how you can use the `redis` policy: ```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -214,7 +214,7 @@ curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 Similarly you can also configure the `redis-cluster` policy: ```bash -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -294,7 +294,7 @@ Server: APISIX web server To disable the `limit-count` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```bash -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/en/latest/plugins/limit-req.md b/docs/en/latest/plugins/limit-req.md index 5760aa5577fb..1911a202786c 100644 --- a/docs/en/latest/plugins/limit-req.md +++ b/docs/en/latest/plugins/limit-req.md @@ -2,10 +2,10 @@ title: limit-req keywords: - APISIX - - Plugin + - API Gateway - Limit Request - limit-req -description: This document contains information about the Apache APISIX limit-req Plugin. +description: The limit-req Plugin limits the number of requests to your service using the leaky bucket algorithm. --- + +## Description + +The `openfunction` Plugin is used to integrate APISIX with [CNCF OpenFunction](https://openfunction.dev/) serverless platform. + +This Plugin can be configured on a Route and requests will be sent to the configured OpenFunction API endpoint as the upstream. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| --------------------------- | ------- | -------- | ------- | ------------ | ---------------------------------------------------------------------------------------------------------- | +| function_uri | string | True | | | function uri. For example, `https://localhost:30858/default/function-sample`. | +| ssl_verify | boolean | False | true | | When set to `true` verifies the SSL certificate. | +| authorization | object | False | | | Authorization credentials to access functions of OpenFunction. | +| authorization.service_token | string | False | | | The token format is 'xx:xx' which supports basic auth for function entry points. | +| timeout | integer | False | 3000 ms | [100, ...] ms| OpenFunction action and HTTP call timeout in ms. | +| keepalive | boolean | False | true | | When set to `true` keeps the connection alive for reuse. | +| keepalive_timeout | integer | False | 60000 ms| [1000,...] ms| Time is ms for connection to remain idle without closing. | +| keepalive_pool | integer | False | 5 | [1,...] | Maximum number of requests that can be sent on this connection before closing it. | + +:::note + +The `timeout` attribute sets the time taken by the OpenFunction to execute, and the timeout for the HTTP client in APISIX. OpenFunction calls may take time to pull the runtime image and start the container. So, if the value is set too small, it may cause a large number of requests to fail. + +::: + +## Prerequisites + +Before configuring the plugin, you need to have OpenFunction running. +Installation of OpenFunction requires a certain version Kubernetes cluster. +For details, please refer to [Installation](https://openfunction.dev/docs/getting-started/installation/). + +### Create and Push a Function + +You can then create a function following the [sample](https://github.com/OpenFunction/samples) + +You'll need to push your function container image to a container registry like Docker Hub or Quay.io when building a function. To do that, you'll need to generate a secret for your container registry first. + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= ${your_registry_user} REGISTRY_PASSWORD= ${your_registry_password} +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +## Enable the Plugin + +You can now configure the Plugin on a specific Route and point to this running OpenFunction service: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample/test", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +## Example usage + +Once you have configured the plugin, you can send a request to the Route and it will invoke the configured function: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +This will give back the response from the function: + +``` +hello, test! +``` + +### Configure Path Transforming + +The `OpenFunction` Plugin also supports transforming the URL path while proxying requests to the OpenFunction API endpoints. Extensions to the base request path get appended to the `function_uri` specified in the Plugin configuration. + +:::info IMPORTANT + +The `uri` configured on a Route must end with `*` for this feature to work properly. APISIX Routes are matched strictly and the `*` implies that any subpath to this URI would be matched to the same Route. + +::: + +The example below configures this feature: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello/*", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +Now, any requests to the path `hello/123` will invoke the OpenFunction, and the added path is forwarded: + +```shell +curl http://127.0.0.1:9080/hello/123 +``` + +```shell +Hello, 123! +``` + +## Disable Plugin + +To disable the `openfunction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/openid-connect.md b/docs/en/latest/plugins/openid-connect.md index 5b33e5d53ad0..56f20e75b777 100644 --- a/docs/en/latest/plugins/openid-connect.md +++ b/docs/en/latest/plugins/openid-connect.md @@ -2,10 +2,10 @@ title: openid-connect keywords: - APISIX - - Plugin + - API Gateway - OpenID Connect - - openid-connect -description: This document contains information about the Apache APISIX openid-connect Plugin. + - OIDC +description: OpenID Connect allows the client to obtain user information from the identity providers, such as Keycloak, Ory Hydra, Okta, Auth0, etc. API Gateway APISIX supports to integrate with the above identity providers to protect your APIs. --- -## How to fetch the metric data +## Fetching metrics -We fetch the metric data from the specified url `/apisix/prometheus/metrics`. +You can fetch the metrics from the specified export URI (default: `/apisix/prometheus/metrics`): -``` +```shell curl -i http://127.0.0.1:9091/apisix/prometheus/metrics ``` -Puts this URL address into prometheus, and it will automatically fetch -these metric data. - -For example like this: +You can add this address to Prometheus to fetch the data: ```yaml scrape_configs: - - job_name: 'apisix' + - job_name: "apisix" scrape_interval: 15s # This value will be related to the time range of the rate function in Prometheus QL. The time range in the rate function should be at least twice this value. - metrics_path: '/apisix/prometheus/metrics' + metrics_path: "/apisix/prometheus/metrics" static_configs: - - targets: ['127.0.0.1:9091'] + - targets: ["127.0.0.1:9091"] ``` -And we can check the status at prometheus console: +Now, you will be able to check the status in your Prometheus console: ![checking status on prometheus dashboard](../../../assets/images/plugin/prometheus01.png) ![prometheus apisix in-depth metric view](../../../assets/images/plugin/prometheus02.png) -## How to specify export uri - -We can change the default export uri in the `plugin_attr` section of `conf/config.yaml`. +## Using Grafana to graph the metrics -| Name | Type | Default | Description | -| ---------- | ------ | ---------------------------- | --------------------------------- | -| export_uri | string | "/apisix/prometheus/metrics" | uri to get the prometheus metrics | +Metrics exported by the `prometheus` Plugin can be graphed in Grafana using a drop in dashboard. -Here is an example: - -```yaml -plugin_attr: - prometheus: - export_uri: /apisix/metrics -``` - -## Grafana dashboard - -Metrics exported by the plugin can be graphed in Grafana using a drop in dashboard. - -Downloads [Grafana dashboard meta](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) and imports it to Grafana。 - -Or you can goto [Grafana official](https://grafana.com/grafana/dashboards/11719) for `Grafana` meta data. +To set it up, download [Grafana dashboard meta](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) and import it in Grafana. Or, you can go to [Grafana official](https://grafana.com/grafana/dashboards/11719) for Grafana metadata. ![Grafana chart-1](../../../assets/images/plugin/grafana-1.png) @@ -153,52 +181,58 @@ Or you can goto [Grafana official](https://grafana.com/grafana/dashboards/11719) ## Available HTTP metrics -* `Status codes`: HTTP status code returned from upstream services. These status code available per service and across all services. +The following metrics are exported by the `prometheus` Plugin: - Attributes: +- Status code: HTTP status code returned from Upstream services. They are available for a single service and across all services. - | Name | Description | - | -------------| --------------------| - | code | The HTTP status code returned by the upstream service. | - | route | The `route_id` of the matched route is request. If it does not match, the default value is an empty string. | - | matched_uri | The `uri` of the route matching the request, if it does not match, the default value is an empty string. | - | matched_host | The `host` of the route that matches the request. If it does not match, the default value is an empty string. | - | service | The `service_id` of the route matched by the request. When the route lacks service_id, the default is `$host`. | - | consumer | The `consumer_name` of the consumer that matches the request. If it does not match, the default value is an empty string. | - | node | The `ip` of the upstream node. | + The available attributes are: -* `Bandwidth`: Total Bandwidth (egress/ingress) flowing through APISIX. The total bandwidth of per service can be counted. + | Name | Description | + |--------------|-------------------------------------------------------------------------------------------------------------------------------| + | code | HTTP status code returned by the upstream service. | + | route | `route_id` of the matched Route with request. Defaults to an empty string if the Routes don't match. | + | matched_uri | `uri` of the Route matching the request. Defaults to an empty string if the Routes don't match. | + | matched_host | `host` of the Route matching the request. Defaults to an empty string if the Routes don't match. | + | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. | + | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. | + | node | IP address of the Upstream node. | - Attributes: +- Bandwidth: Total amount of traffic (ingress and egress) flowing through APISIX. Total bandwidth of a service can also be obtained. - | Name | Description | - | -------------| ------------- | - | type | The type of bandwidth(egress/ingress). | - | route | The `route_id` of the matched route is request. If it does not match, the default value is an empty string.. | - | service | The `service_id` of the route matched by the request. When the route lacks service_id, the default is `$host`. | - | consumer | The `consumer_name` of the consumer that matches the request. If it does not match, the default value is an empty string. | - | node | The `ip` of the upstream node. | + The available attributes are: -* `etcd reachability`: A gauge type with a value of 0 or 1, representing if etcd can be reached by a APISIX or not, where `1` is available, and `0` is unavailable. -* `Connections`: Various Nginx connection metrics like active, reading, writing, and number of accepted connections. -* `Batch process entries`: A gauge type, when we use plugins and the plugin used batch process to send data, such as: sys logger, http logger, sls logger, tcp logger, udp logger and zipkin, then the entries which hasn't been sent in batch process will be counted in the metrics. -* `Latency`: The per service histogram of request time in different dimensions. + | Name | Description | + |----------|-------------------------------------------------------------------------------------------------------------------------------| + | type | Type of traffic (egress/ingress). | + | route | `route_id` of the matched Route with request. Defaults to an empty string if the Routes don't match. | + | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. | + | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. | + | node | IP address of the Upstream node. | + +- etcd reachability: A gauge type representing whether etcd can be reached by APISIX. A value of `1` represents reachable and `0` represents unreachable. +- Connections: Nginx connection metrics like active, reading, writing, and number of accepted connections. +- Batch process entries: A gauge type useful when Plugins like [syslog](./syslog.md), [http-logger](./http-logger.md), [tcp-logger](./tcp-logger.md), [udp-logger](./udp-logger.md), and [zipkin](./zipkin.md) use batch process to send data. Entries that hasn't been sent in batch process will be counted in the metrics. +- Latency: Histogram of the request time per service in different dimensions. - Attributes: + The available attributes are: - | Name | Description | - | ----------| ------------- | - | type | The value can be `apisix`, `upstream` or `request`, which means http latency caused by apisix, upstream, or their sum. | - | service | The `service_id` of the route matched by the request. When the route lacks service_id, the default is `$host`. | - | consumer | The `consumer_name` of the consumer that matches the request. If it does not match, the default value is an empty string. | - | node | The `ip` of the upstream node. | + | Name | Description | + |----------|-------------------------------------------------------------------------------------------------------------------------------------| + | type | Value can be one of `apisix`, `upstream`, or `request`. This translates to latency caused by APISIX, Upstream, or both (their sum). | + | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. | + | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. | + | node | IP address of the Upstream node. | -* `Info`: the information of APISIX node. +- Info: Information about the APISIX node. +- Shared dict: The capacity and free space of all nginx.shared.DICT in APISIX. -Here is the original metric data of APISIX: +Here are the original metrics from APISIX: + +```shell +curl http://127.0.0.1:9091/apisix/prometheus/metrics +``` ```shell -$ curl http://127.0.0.1:9091/apisix/prometheus/metrics # HELP apisix_bandwidth Total bandwidth in bytes consumed per service in Apisix # TYPE apisix_bandwidth counter apisix_bandwidth{type="egress",route="",service="",consumer="",node=""} 8417 @@ -262,15 +296,32 @@ apisix_http_latency_bucket{type="upstream",route="1",service="",consumer="",node # HELP apisix_node_info Info of APISIX node # TYPE apisix_node_info gauge apisix_node_info{hostname="desktop-2022q8f-wsl"} 1 +# HELP apisix_shared_dict_capacity_bytes The capacity of each nginx shared DICT since APISIX start +# TYPE apisix_shared_dict_capacity_bytes gauge +apisix_shared_dict_capacity_bytes{name="access-tokens"} 1048576 +apisix_shared_dict_capacity_bytes{name="balancer-ewma"} 10485760 +apisix_shared_dict_capacity_bytes{name="balancer-ewma-last-touched-at"} 10485760 +apisix_shared_dict_capacity_bytes{name="balancer-ewma-locks"} 10485760 +apisix_shared_dict_capacity_bytes{name="discovery"} 1048576 +apisix_shared_dict_capacity_bytes{name="etcd-cluster-health-check"} 10485760 +... +# HELP apisix_shared_dict_free_space_bytes The free space of each nginx shared DICT since APISIX start +# TYPE apisix_shared_dict_free_space_bytes gauge +apisix_shared_dict_free_space_bytes{name="access-tokens"} 1032192 +apisix_shared_dict_free_space_bytes{name="balancer-ewma"} 10412032 +apisix_shared_dict_free_space_bytes{name="balancer-ewma-last-touched-at"} 10412032 +apisix_shared_dict_free_space_bytes{name="balancer-ewma-locks"} 10412032 +apisix_shared_dict_free_space_bytes{name="discovery"} 1032192 +apisix_shared_dict_free_space_bytes{name="etcd-cluster-health-check"} 10412032 +... ``` ## Disable Plugin -Remove the corresponding json configuration in the plugin configuration to disable `prometheus`. -APISIX plugins are hot-reloaded, therefore no need to restart APISIX. +To disable the `prometheus` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, @@ -304,7 +355,7 @@ stream_plugins: Then you need to configure the `prometheus` plugin on the stream route: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "prometheus":{} diff --git a/docs/en/latest/plugins/proxy-cache.md b/docs/en/latest/plugins/proxy-cache.md index 0539c07116bc..0b10f54a8697 100644 --- a/docs/en/latest/plugins/proxy-cache.md +++ b/docs/en/latest/plugins/proxy-cache.md @@ -2,10 +2,9 @@ title: proxy-cache keywords: - APISIX - - Plugin + - API Gateway - Proxy Cache - - proxy-cache -description: This document contains information about the Apache APISIX proxy-cache Plugin. +description: This document contains information about the Apache APISIX proxy-cache Plugin, you can use it to cache the response from the Upstream. --- + +## Description + +The `workflow` plugin is used to introduce [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) to provide complex traffic control features. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------------- | ------------- | -------- | ------- | ------------ | ------------------------------------------------------------ | +| rules.case | array[array] | True | | | List of variables to match for filtering requests for conditional traffic split. It is in the format `{variable operator value}`. For example, `{"arg_name", "==", "json"}`. The variables here are consistent with NGINX internal variables. For details on supported operators, you can refer to [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). | +| rules.actions | array[object] | True | | | The action to be performed when the case matches successfully. Currently, only one element is supported in actions. The first child element of the actions' only element can be `return` or `limit-count`. | + +### `actions` Attributes + +#### return + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------- | ------------- | -------- | ------- | ------------ | ---------------------------------------------------------- | +| actions[1].return | string | False | | | Return directly to the client. | +| actions[1].[2].code | integer | False | | | HTTP status code returned to the client. | + +#### limit-count + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------- | ------------- | -------- | ------- | ------------ | ---------------------------------------------------------------- | +| actions[1].limit-count | string | False | | | Execute the functions of the `limit-count` plugin. | +| actions[1].[2] | object | False | | | Configuration of `limit-count` plugin, `group` is not supported. | + +:::note + +In `rules`, match `case` in order according to the index of the `rules`, and execute `actions` directly if `case` match. + +::: + +## Enabling the Plugin + +You can configure the `workflow` plugin on a Route as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "plugins":{ + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/hello/rejected"] + ], + "actions":[ + [ + "return", + {"code": 403} + ] + ] + }, + { + "case":[ + ["uri", "==", "/hello/v2/appid"] + ], + "actions":[ + [ + "limit-count", + { + "count":2, + "time_window":60, + "rejected_code":429 + } + ] + ] + } + ] + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` + +Here, the `workflow` Plugin is enabled on the Route. If the request matches the `case` in the `rules`, the `actions` will be executed. + +**Example 1: If the requested uri is `/hello/rejected`, the status code `403` is returned to the client** + +```shell +curl http://127.0.0.1:9080/hello/rejected -i +HTTP/1.1 403 Forbidden +...... + +{"error_msg":"rejected by workflow"} +``` + +**Example 2: if the request uri is `/hello/v2/appid`, the `workflow` plugin would execute the `limit-count` plugin** + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 429 Too Many Requests +``` + +**Example 3: if the request can not match any `case` in the `rules`, the `workflow` plugin would do nothing** + +```shell +curl http://127.0.0.1:0080/hello/fake -i +HTTP/1.1 200 OK +``` + +## Disable Plugin + +To disable the `workflow` plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/zipkin.md b/docs/en/latest/plugins/zipkin.md index 0e9909e1d91e..94c13f5e3e79 100644 --- a/docs/en/latest/plugins/zipkin.md +++ b/docs/en/latest/plugins/zipkin.md @@ -111,7 +111,7 @@ func main(){ The example below enables the Plugin on a specific Route: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -178,7 +178,7 @@ docker run -d --name jaeger \ Similar to configuring for Zipkin, create a Route and enable the Plugin: ``` -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -221,7 +221,7 @@ You can access the Jaeger UI to view the traces in endpoint [http://127.0.0.1:16 To disable the `zipkin` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/en/latest/pubsub.md b/docs/en/latest/pubsub.md index d03ba03ac67c..2db871cecb0f 100644 --- a/docs/en/latest/pubsub.md +++ b/docs/en/latest/pubsub.md @@ -115,7 +115,7 @@ The plugins list [config-default.yaml](https://github.com/apache/apisix/blob/mas After this is done, create a route like the one below to connect to this messaging system via APISIX using the WebSocket. ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/kafka' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ -H 'X-API-KEY: ${api-key}' \ -H 'Content-Type: application/json' \ -d '{ diff --git a/docs/en/latest/pubsub/kafka.md b/docs/en/latest/pubsub/kafka.md index b25936641d0f..2e0cc98c8887 100644 --- a/docs/en/latest/pubsub/kafka.md +++ b/docs/en/latest/pubsub/kafka.md @@ -74,7 +74,7 @@ Possible response body: When an error occurs, `ErrorResp` will be returned, whic Create a route, set the upstream `scheme` field to `kafka`, and configure `nodes` to be the address of the Kafka broker. ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/kafka' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ -H 'X-API-KEY: ' \ -H 'Content-Type: application/json' \ -d '{ @@ -98,7 +98,7 @@ After configuring the route, you can use this feature. Simply turn on the `kafka-proxy` plugin on the created route and enable the Kafka TLS handshake and SASL authentication through the configuration, which can be found in the [plugin documentation](../../../en/latest/plugins/kafka-proxy.md). ```shell -curl -X PUT 'http://127.0.0.1:9080/apisix/admin/routes/kafka' \ +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ -H 'X-API-KEY: ' \ -H 'Content-Type: application/json' \ -d '{ diff --git a/docs/en/latest/router-radixtree.md b/docs/en/latest/router-radixtree.md index e2d0de65b6ff..7af4bcf8e1d5 100644 --- a/docs/en/latest/router-radixtree.md +++ b/docs/en/latest/router-radixtree.md @@ -82,7 +82,7 @@ Note: In the matching rules, the `priority` field takes precedence over other ru Create two routes with different `priority` values ​​(the larger the value, the higher the priority). ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -96,7 +96,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -123,7 +123,7 @@ All requests only hit the route of port `1980`. Here is an example of setting host matching rules: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -137,7 +137,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -197,7 +197,7 @@ Please take a look at [radixtree-new](https://github.com/api7/lua-resty-radixtre here is an simple example: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "vars": [ @@ -225,7 +225,7 @@ APISIX supports filtering route by POST form attributes with `Content-Type` = `a We can define the following route: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST", "GET"], "uri": "/_post", @@ -273,7 +273,7 @@ query getRepo { We can filter such route out with: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST", "GET"], "uri": "/graphql", diff --git a/docs/en/latest/stand-alone.md b/docs/en/latest/stand-alone.md index 736bd39601a3..63a769533c7a 100644 --- a/docs/en/latest/stand-alone.md +++ b/docs/en/latest/stand-alone.md @@ -284,9 +284,6 @@ stream_routes: mqtt-proxy: protocol_name: "MQTT" protocol_level: 4 - upstream: - ip: "127.0.0.1" - port: 1995 upstreams: - nodes: "127.0.0.1:1995": 1 diff --git a/docs/en/latest/stream-proxy.md b/docs/en/latest/stream-proxy.md index 34797ec98c2a..0ab87b8df4ef 100644 --- a/docs/en/latest/stream-proxy.md +++ b/docs/en/latest/stream-proxy.md @@ -59,7 +59,7 @@ apisix: Here is a mini example: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "remote_addr": "127.0.0.1", "upstream": { @@ -86,7 +86,7 @@ And we can add more options to match a route. Currently stream route configurati Here is an example: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.1", "server_port": 2000, @@ -129,7 +129,7 @@ Let's take another real world example: 3. Now we are going to create a stream route with server filtering: ```shell - curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.10", "server_port": 9101, @@ -187,7 +187,7 @@ mTLS is also supported, see [Protect Route](./mtls.md#protect-route) for how to Third, we need to configure a stream route to match and proxy it to the upstream: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -201,7 +201,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 When the connection is TLS over TCP, we can use the SNI to match a route, like: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "sni": "a.test.com", "upstream": { @@ -220,7 +220,7 @@ In this case, a connection handshaked with SNI `a.test.com` will be proxied to ` APISIX also supports proxying to TLS over TCP upstream. ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "scheme": "tls", diff --git a/docs/en/latest/terminology/consumer.md b/docs/en/latest/terminology/consumer.md index 331cc6cc9c41..3cd665fbdfc6 100644 --- a/docs/en/latest/terminology/consumer.md +++ b/docs/en/latest/terminology/consumer.md @@ -60,7 +60,7 @@ The example below shows how you can enable a Plugin for a specific Consumer. ```shell # Create a Consumer, specify the authentication plugin key-auth, and enable the specific plugin limit-count -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -77,7 +77,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335 }' # Create a Router, set routing rules and enable plugin configuration -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {} @@ -109,7 +109,7 @@ We can use the [consumer-restriction](../plugins/consumer-restriction.md) Plugin ```shell # Add Jack to the blacklist -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {}, diff --git a/docs/en/latest/architecture-design/plugin-config.md b/docs/en/latest/terminology/plugin-config.md similarity index 84% rename from docs/en/latest/architecture-design/plugin-config.md rename to docs/en/latest/terminology/plugin-config.md index 075562f414bc..a544312adc3a 100644 --- a/docs/en/latest/architecture-design/plugin-config.md +++ b/docs/en/latest/terminology/plugin-config.md @@ -1,5 +1,10 @@ --- title: Plugin Config +keywords: + - API gateway + - Apache APISIX + - Plugin Config +description: Plugin Config in Apache APISIX. --- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +如果你希望为 APISIX 做出贡献或配置开发环境,你可以参考本教程。 + +如果你想通过其他方式安装 APISIX,你可以参考[安装指南](./installation-guide.md)。 + +:::note + +如果你想为特定的环境或打包 APISIX,请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools)。 + +::: + +## 源码安装 APISIX + +首先,你可以通过以下命令安装依赖项: + +```shell +curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash - +``` + +然后,创建一个目录并设置环境变量 `APISIX_VERSION`: + +```shell +APISIX_VERSION='2.15.0' +mkdir apisix-${APISIX_VERSION} +``` + +现在,你可以运行以下命令来下载 APISIX 源码包: + +```shell +wget https://downloads.apache.org/apisix/${APISIX_VERSION}/apache-apisix-${APISIX_VERSION}-src.tgz +``` + +你可以从[下载页面](https://apisix.apache.org/downloads/)下载源码包。你也可以在该页面找到 APISIX Dashboard 和 APISIX Ingress Controller 的源码包。 + +下载源码包后,你可以将文件解压到之前创建的文件夹中: + +```shell +tar zxvf apache-apisix-${APISIX_VERSION}-src.tgz -C apisix-${APISIX_VERSION} +``` + +然后切换到解压的目录,创建依赖项并安装 APISIX,如下所示: + +```shell +cd apisix-${APISIX_VERSION} +make deps +make install +``` + +该命令将安装 APISIX 运行时依赖的 Lua 库和 `apisix` 命令。 + +:::note + +如果你在运行 `make deps` 时收到类似 `Could not find header file for LDAP/PCRE/openssl` 的错误消息,请使用此解决方案。 + +`luarocks` 支持自定义编译时依赖项(请参考:[配置文件格式](https://github.com/luarocks/luarocks/wiki/Config-file-format))。你可以使用第三方工具安装缺少的软件包并将其安装目录添加到 `luarocks` 变量表中。此方法适用于 macOS、Ubuntu、CentOS 和其他类似操作系统。 + +此处仅给出 macOS 的具体解决步骤,其他操作系统的解决方案类似: + +1. 安装 `openldap`: + + ```shell + brew install openldap + ``` + +2. 使用以下命令命令找到本地安装目录: + + ```shell + brew --prefix openldap + ``` + +3. 将路径添加到项目配置文件中(选择两种方法中的一种即可): + 1. 你可以使用 `luarocks config` 命令设置 `LDAP_DIR`: + + ```shell + luarocks config variables.LDAP_DIR /opt/homebrew/cellar/openldap/2.6.1 + ``` + + 2. 你还可以更改 `luarocks` 的默认配置文件。打开 `~/.luaorcks/config-5.1.lua` 文件并添加以下内容: + + ```shell + variables = { LDAP_DIR = "/opt/homebrew/cellar/openldap/2.6.1", LDAP_INCDIR = "/opt/homebrew/cellar/openldap/2.6.1/include", } + ``` + + `/opt/homebrew/cellar/openldap/` 是 `brew` 在 macOS(Apple Silicon) 上安装 `openldap` 的默认位置。`/usr/local/opt/openldap/` 是 brew 在 macOS(Intel) 上安装 openldap 的默认位置。 + +::: + +如果你不再需要 APISIX,可以执行以下命令卸载: + +```shell +make uninstall && make undeps +``` + +:::danger + +该操作将删除所有相关文件。 + +::: + +## 安装 etcd + +APISIX 默认使用 [etcd](https://github.com/etcd-io/etcd) 来保存和同步配置。在运行 APISIX 之前,你需要在你的机器上安装 etcd。 + + + + +```shell +ETCD_VERSION='3.4.18' +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz +tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ + cd etcd-v${ETCD_VERSION}-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ +nohup etcd >/tmp/etcd.log 2>&1 & +``` + + + + + +```shell +brew install etcd +brew services start etcd +``` + + + + +## 管理 APISIX 服务 + +运行以下命令初始化 NGINX 配置文件和 etcd。 + +```shell +apisix init +``` + +:::tip + +你可以运行 `apisix help` 命令,查看返回结果,获取其他操作命令及其描述。 + +::: + +运行以下命令测试配置文件,APISIX 将根据 `config.yaml` 生成 `nginx.conf`,并检查 `nginx.conf` 的语法是否正确。 + +```shell +apisix test +``` + +最后,你可以使用以下命令运行 APISIX。 + +```shell +apisix start +``` + +如果需要停止 APISIX,你可以使用 `apisix quit` 或者 `apisix stop` 命令。 + +`apisix quit` 将正常关闭 APISIX,该指令确保在停止之前完成所有收到的请求。 + +```shell +apisix quit +``` + +`apisix stop` 命令会强制关闭 APISIX 并丢弃所有请求。 + +```shell +apisix stop +``` + +## 为 APISIX 构建 APISIX-Base + +APISIX 的一些特性需要在 OpenResty 中引入额外的 NGINX 模块。 + +如果要使用这些功能,你需要构建一个自定义的 OpenResty 发行版(APISIX-Base)。请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools) 配置你的构建环境并进行构建。 + +## 运行测试用例 + +以下步骤展示了如何运行 APISIX 的测试用例: + +1. 安装 `perl` 的包管理器 [cpanminus](https://metacpan.org/pod/App::cpanminus#INSTALLATION)。 +2. 通过 `cpanm` 来安装 [test-nginx](https://github.com/openresty/test-nginx) 的依赖: + + ```shell + sudo cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + ``` + +3. 将 `test-nginx` 源码克隆到本地: + + ```shell + git clone https://github.com/openresty/test-nginx.git + ``` + +4. 运行以下命令将当前目录添加到 Perl 的模块目录: + + ```shell + export PERL5LIB=.:$PERL5LIB + ``` + + 你可以通过运行以下命令指定 NGINX 二进制路径: + + ```shell + TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t + ``` + +5. 运行测试: + + ```shell + make test + ``` + +:::note + +部分测试需要依赖外部服务和修改系统配置。如果想要完整地构建测试环境,请参考 [ci/linux_openresty_common_runner.sh](https://github.com/apache/apisix/blob/master/ci/linux_openresty_common_runner.sh)。 + +::: + +### 故障排查 + +以下是运行 APISIX 测试用例的常见故障排除步骤。 + +出现 `Error unknown directive "lua_package_path" in /API_ASPIX/apisix/t/servroot/conf/nginx.conf` 报错,是因为默认的 NGINX 安装路径未找到,解决方法如下: + +- Linux 默认安装路径: + + ```shell + export PATH=/usr/local/openresty/nginx/sbin:$PATH + ``` + +- macOS 通过 `homebrew` 的默认安装路径: + + ```shell + export PATH=/usr/local/opt/openresty/nginx/sbin:$PATH + ``` + +### 运行指定的测试用例 + +使用以下命令运行指定的测试用例: + +```shell +prove -Itest-nginx/lib -r t/plugin/openid-connect.t +``` + +如果你想要了解更多信息,请参考 [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md)。 diff --git a/docs/zh/latest/certificate.md b/docs/zh/latest/certificate.md index e6c433e2fc9c..06f7933c2d84 100644 --- a/docs/zh/latest/certificate.md +++ b/docs/zh/latest/certificate.md @@ -52,7 +52,7 @@ with open(sys.argv[2]) as f: key = f.read() sni = sys.argv[3] api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ "cert": cert, "key": key, "snis": [sni], @@ -68,7 +68,7 @@ print(resp.text) ./ssl.py t.crt t.key test.com # 创建 Router 对象 -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["test.com"], @@ -113,7 +113,7 @@ curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -vvv ```shell ./ssl.py t.crt t.key '*.test.com' -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", "hosts": ["*.test.com"], @@ -168,3 +168,152 @@ curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/hello -v * `keys`:PEM 格式的 SSL 证书私钥列表 `APISIX` 会将相同下标的证书和私钥配对使用,因此 `certs` 和 `keys` 列表的长度必须一致。 + +### 设置多个 CA 证书 + +APISIX 目前支持在多处设置 CA 证书,比如 [保护 Admin API](./mtls.md#保护-admin-api),[保护 ETCD](./mtls.md#保护-etcd),以及 [部署模式](../../en/latest/architecture-design/deployment-role.md) 等。 + +在这些地方,使用 `ssl_trusted_certificate` 或 `trusted_ca_cert` 来配置 CA 证书,但是这些配置最终将转化为 OpenResty 的 [lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate) 指令。 + +如果你需要在不同的地方指定不同的 CA 证书,你可以将这些 CA 证书制作成一个 CA bundle 文件,在需要用到 CA 证书的地方将配置指向这个文件。这样可以避免生成的 `lua_ssl_trusted_certificate` 存在多处并且互相覆盖的问题。 + +下面用一个完整的例子来展示如何在 APISIX 设置多个 CA 证书。 + +假设让 client 与 APISIX Admin API,APISIX 与 ETCD 之间都使用 mTLS 协议进行通信,目前有两张 CA 证书,分别是 `foo_ca.crt` 和 `bar_ca.crt`,用这两张 CA 证书各自签发 client 与 server 证书对,`foo_ca.crt` 及其签发的证书对用于保护 Admin API,`bar_ca.crt` 及其签发的证书对用于保护 ETCD。 + +下表详细列出这个示例所涉及到的配置及其作用: + +| 配置 | 类型 | 用途 | +| ------------- | ------- | ----------------------------------------------------------------------------------------------------------- | +| foo_ca.crt | CA 证书 | 签发客户端与 APISIX Admin API 进行 mTLS 通信所需的次级证书。 | +| foo_client.crt | 证书 | 由 `foo_ca.crt` 签发,客户端使用,访问 APISIX Admin API 时证明自身身份的证书。 | +| foo_client.key | 密钥文件 | 由 `foo_ca.crt` 签发,客户端使用,访问 APISIX Admin API 所需的密钥文件。 | +| foo_server.crt | 证书 | 由 `foo_ca.crt` 签发,APISIX 使用,对应 `apisix.admin_api_mtls.admin_ssl_cert` 配置项。 | +| foo_server.key | 密钥文件 | 由 `foo_ca.crt` 签发,APISIX 使用,对应 `apisix.admin_api_mtls.admin_ssl_cert_key` 配置项。 | +| admin.apisix.dev | 域名 | 签发 `foo_server.crt` 证书时使用的 Common Name,客户端通过该域名访问 APISIX Admin API | +| bar_ca.crt | CA 证书 | 签发 APISIX 与 ETCD 进行 mTLS 通信所需的次级证书。 | +| bar_etcd.crt | 证书 | 由 `bar_ca.crt` 签发,ETCD 使用,对应 ETCD 启动命令中的 `--cert-file` 选项。 | +| bar_etcd.key | 密钥文件 | 由 `bar_ca.crt` 签发,ETCD 使用,对应 ETCD 启动命令中的 `--key-file` 选项。 | +| bar_apisix.crt | 证书 | 由 `bar_ca.crt` 签发,APISIX 使用,对应 `etcd.tls.cert` 配置项。 | +| bar_apisix.key | 密钥文件 | 由 `bar_ca.crt` 签发,APISIX 使用,对应 `etcd.tls.key` 配置项。 | +| etcd.cluster.dev | 域名 | 签发 `bar_etcd.crt` 证书时使用的 Common Name,APISIX 与 ETCD 进行 mTLS 通信时,使用该域名作为 SNI。对应 `etcd.tls.sni` 配置项。| +| apisix.ca-bundle | CA bundle | 由 `foo_ca.crt` 与 `bar_ca.crt` 合并而成,替代 `foo_ca.crt` 与 `bar_ca.crt`。 | + +1. 制作 CA bundle 文件 + +``` +cat /path/to/foo_ca.crt /path/to/bar_ca.crt > apisix.ca-bundle +``` + +2. 启动 ETCD 集群,并开启客户端验证 + +先编写 `goreman` 配置,命名为 `Procfile-single-enable-mtls`,内容如下: + +```text +# 运行 `go get github.com/mattn/goreman` 安装 goreman,用 goreman 执行以下命令: +etcd1: etcd --name infra1 --listen-client-urls https://127.0.0.1:12379 --advertise-client-urls https://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd2: etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd3: etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +``` + +使用 `goreman` 来启动 ETCD 集群: + +```shell +goreman -f Procfile-single-enable-mtls start > goreman.log 2>&1 & +``` + +3. 更新 `config.yaml` + +```yaml +apisix: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + + admin_api_mtls: + admin_ssl_ca_cert: /path/to/apisix.ca-bundle + admin_ssl_cert: /path/to/foo_server.crt + admin_ssl_cert_key: /path/to/foo_server.key + + ssl: + ssl_trusted_certificate: /path/to/apisix.ca-bundle + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + - "https://127.0.0.1:22379" + - "https://127.0.0.1:32379" + tls: + cert: /path/to/bar_apisix.crt + key: /path/to/bar_apisix.key + sni: etcd.cluster.dev +``` + +4. 测试 Admin API + +启动 APISIX,如果 APISIX 启动成功,`logs/error.log` 中没有异常输出,表示 APISIX 与 ETCD 之间进行 mTLS 通信正常。 + +用 curl 模拟客户端,与 APISIX Admin API 进行 mTLS 通信,并创建一条路由: + +```shell +curl -vvv \ + --resolve 'admin.apisix.dev:9180:127.0.0.1' https://admin.apisix.dev:9180/apisix/admin/routes/1 \ + --cert /path/to/foo_client.crt \ + --key /path/to/foo_client.key \ + --cacert /path/to/apisix.ca-bundle \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +如果输出以下 SSL 握手过程,表示 curl 与 APISIX Admin API 之间 mTLS 通信成功: + +```shell +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Request CERT (13): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Certificate (11): +* TLSv1.3 (OUT), TLS handshake, CERT verify (15): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +5. 验证 APISIX 代理 + +```shell +curl http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 298 +Connection: keep-alive +Date: Tue, 26 Jul 2022 16:31:00 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/2.14.1 + +…… +``` + +APISIX 将请求代理到了上游 `httpbin.org` 的 `/get` 路径,并返回了 `HTTP/1.1 200 OK`。整个过程使用 CA bundle 替代 CA 证书是正常可用的。 diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json index 940f16015df1..5d68a50eed11 100644 --- a/docs/zh/latest/config.json +++ b/docs/zh/latest/config.json @@ -1,5 +1,5 @@ { - "version": "2.14.1", + "version": "2.15.0", "sidebar": [ { "type": "category", @@ -48,7 +48,8 @@ "plugins/real-ip", "plugins/server-info", "plugins/ext-plugin-post-req", - "plugins/ext-plugin-pre-req" + "plugins/ext-plugin-pre-req", + "plugins/ext-plugin-post-resp" ] }, { @@ -91,7 +92,8 @@ "plugins/ua-restriction", "plugins/referer-restriction", "plugins/consumer-restriction", - "plugins/csrf" + "plugins/csrf", + "plugins/public-api" ] }, { @@ -150,7 +152,9 @@ "plugins/sls-logger", "plugins/google-cloud-logging", "plugins/splunk-hec-logging", - "plugins/file-logger" + "plugins/file-logger", + "plugins/loggly", + "plugins/elasticsearch-logger" ] } ] @@ -162,7 +166,9 @@ "plugins/serverless", "plugins/azure-functions", "plugins/openwhisk", - "plugins/aws-lambda" + "plugins/aws-lambda", + "plugins/workflow", + "plugins/openfunction" ] }, { @@ -189,6 +195,16 @@ } ] }, + { + "type": "category", + "label": "Development", + "items": [ + { + "type": "doc", + "id": "building-apisix" + } + ] + }, { "type": "doc", "id": "FAQ" @@ -205,7 +221,7 @@ "discovery/dns", "discovery/nacos", "discovery/eureka", - "discovery/zookeeper", + "discovery/control-plane-service-discovery", "discovery/kubernetes" ] }, diff --git a/docs/zh/latest/control-api.md b/docs/zh/latest/control-api.md index 89792c430fec..541034b0c395 100644 --- a/docs/zh/latest/control-api.md +++ b/docs/zh/latest/control-api.md @@ -208,3 +208,40 @@ APISIX 中一些插件添加了自己的 control API。如果你对他们感兴 在 http 子系统中触发一次全量 GC 注意,当你启用 stream proxy 时,APISIX 将为 stream 子系统运行另一个 Lua 虚拟机。它不会触发这个 Lua 虚拟机中的全量 GC。 + +### GET /v1/plugin_metadatas + +引入自 3.0.0 版本 + +打印所有插件的元数据: + +```json +[ + { + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" + }, + { + "ikey": 1, + "skey": "val", + "id": "example-plugin" + } +] +``` + +### GET /v1/plugin_metadata/{plugin_name} + +引入自 3.0.0 版本 + +打印指定插件的元数据: + +```json +{ + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" +} +``` diff --git a/docs/zh/latest/debug-function.md b/docs/zh/latest/debug-function.md index 5ed2dc74993f..4f2f164dd039 100644 --- a/docs/zh/latest/debug-function.md +++ b/docs/zh/latest/debug-function.md @@ -34,7 +34,7 @@ title: 调试功能 示例 1:`502` 响应状态码来源于 `Upstream` (IP 地址不可用) ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "upstream": { @@ -75,7 +75,7 @@ $ curl http://127.0.0.1:9080/hello -v 示例 2:`502` 响应状态码来源于 `APISIX` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "fault-injection": { @@ -109,7 +109,7 @@ Fault Injection! 示例 3:`Upstream` 具有多节点,并且所有节点不可用 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "nodes": { "127.0.0.3:1": 1, @@ -122,7 +122,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034 ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream_id": "1" diff --git a/docs/zh/latest/discovery.md b/docs/zh/latest/discovery.md index 87687caf5674..189945a0d1d7 100644 --- a/docs/zh/latest/discovery.md +++ b/docs/zh/latest/discovery.md @@ -49,7 +49,7 @@ APISIX 要扩展注册中心其实是件非常容易的事情,其基本步骤 首先,在 `apisix/discovery` 下创建 `eureka` 目录; -其次,在 `apisix/discovery/eureka` 目录中添加 [`init.lua`](../../../apisix/discovery/eureka/init.lua); +其次,在 `apisix/discovery/eureka` 目录中添加 [`init.lua`](https://github.com/apache/apisix/blob/master/apisix/discovery/init.lua); 然后在 `init.lua` 实现用于初始化的 `init_worker` 函数以及用于获取服务实例节点列表的 `nodes` 函数即可: @@ -189,7 +189,7 @@ discovery: APISIX 是通过 `upstream.discovery_type` 选择使用的服务发现,`upstream.service_name` 与注册中心的服务名进行关联。下面是将 URL 为 "/user/\*" 的请求路由到注册中心名为 "USER-SERVICE" 的服务上例子: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/user/*", "upstream": { @@ -206,13 +206,13 @@ Transfer-Encoding: chunked Connection: keep-alive Server: APISIX web server -{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} ``` 因为上游的接口 URL 可能会有冲突,通常会在网关通过前缀来进行区分: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/a/*", "plugins": { @@ -227,7 +227,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f } }' -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/b/*", "plugins": { diff --git a/docs/zh/latest/discovery/control-plane-service-discovery.md b/docs/zh/latest/discovery/control-plane-service-discovery.md new file mode 100644 index 000000000000..b6bcb7450901 --- /dev/null +++ b/docs/zh/latest/discovery/control-plane-service-discovery.md @@ -0,0 +1,72 @@ +--- +title: 控制面服务发现 +keywords: + - API 网关 + - APISIX + - ZooKeeper + - Nacos + - APISIX-Seed +description: 本文档介绍了如何在 API 网关 Apache APISIX 控制面通过 Nacos 和 Zookeeper 实现服务发现。 +--- + + + +本文档介绍了如何在 APISIX 控制面通过 Nacos 和 Zookeeper 实现服务发现。 + +## APISIX-Seed 架构 + +Apache APISIX 在早期已经支持了数据面服务发现,现在 APISIX 也通过 [APISIX-Seed](https://github.com/api7/apisix-seed) 项目实现了控制面服务发现,下图为 APISIX-Seed 架构图。 + +![control-plane-service-discovery](../../../assets/images/control-plane-service-discovery.png) + +图中的数字代表的具体信息如下: + +1. 通过 Admin API 向 APISIX 注册上游并指定服务发现类型。APISIX-Seed 将监听 etcd 中的 APISIX 资源变化,过滤服务发现类型并获取服务名称(如 ZooKeeper); +2. APISIX-Seed 将在服务注册中心(如 ZooKeeper)订阅指定的服务名称,以监控和更新对应的服务信息; +3. 客户端向服务注册中心注册服务后,APISIX-Seed 会获取新的服务信息,并将更新后的服务节点写入 etcd; +4. 当 APISIX-Seed 在 etcd 中更新相应的服务节点信息时,APISIX 会将最新的服务节点信息同步到内存中。 + +:::note + +引入 APISIX-Seed 后,如果注册中心的服务变化频繁,etcd 中的数据也会频繁变化。因此,需要在启动 etcd 时设置 `--auto-compaction` 选项,用来定期压缩历史记录,避免耗尽 etcd 存储空间。详细信息请参考 [revisions](https://etcd.io/docs/v3.5/learning/api/#revisions)。 + +::: + +## 为什么需要 APISIX-Seed? + +- 网络拓扑变得更简单 + + APISIX 不需要与每个注册中心保持网络连接,只需要关注 etcd 中的配置信息即可。这将大大简化网络拓扑。 + +- 上游服务总数据量变小 + + 由于 `registry` 的特性,APISIX 可能会在 Worker 中存储全量的 `registry` 服务数据,例如 Consul_KV。通过引入 APISIX-Seed,APISIX 的每个进程将不需要额外缓存上游服务相关信息。 + +- 更容易管理 + + 服务发现配置需要为每个 APISIX 实例配置一次。通过引入 APISIX-Seed,APISIX 将对服务注册中心的配置变化无感知。 + +## 支持的服务发现类型 + +目前已经支持了 ZooKeeper 和 Nacos,后续还将支持更多的服务注册中心,更多信息请参考:[APISIX Seed](https://github.com/api7/apisix-seed#apisix-seed-for-apache-apisix)。 + +- 如果你想启用控制面 ZooKeeper 服务发现,请参考:[ZooKeeper 部署教程](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md)。 + +- 如果你想启用控制面 Nacos 服务发现,请参考:[Nacos 部署教程](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md)。 diff --git a/docs/zh/latest/discovery/kubernetes.md b/docs/zh/latest/discovery/kubernetes.md index c08672cce90a..6e0f449f2046 100644 --- a/docs/zh/latest/discovery/kubernetes.md +++ b/docs/zh/latest/discovery/kubernetes.md @@ -52,6 +52,8 @@ discovery: # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI + default_weight: 50 # weight assigned to each discovered endpoint. default 50, minimum 0 + # kubernetes discovery plugin support use namespace_selector # you can use one of [equal, not_equal, match, not_match] filter namespace namespace_selector: diff --git a/docs/zh/latest/discovery/nacos.md b/docs/zh/latest/discovery/nacos.md index 3d0e927ee82b..c0d7261e3a8b 100644 --- a/docs/zh/latest/discovery/nacos.md +++ b/docs/zh/latest/discovery/nacos.md @@ -60,7 +60,7 @@ discovery: 该服务在 Nacos 中的服务名是 APISIX-NACOS ,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS ,创建路由时指定服务发现类型为 nacos 。 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacos/*", "upstream": { @@ -93,8 +93,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f "priority": 0, "uri": "\/nacos\/*" } - }, - "action": "set" + } } ``` @@ -111,7 +110,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f 该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns ,创建路由时指定服务发现类型为 nacos 。 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacosWithNamespaceId/*", "upstream": { @@ -150,8 +149,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f "priority": 0, "uri": "\/nacosWithNamespaceId\/*" } - }, - "action": "set" + } } ``` @@ -161,7 +159,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f 该服务在 Nacos 中的服务名是 APISIX-NACOS,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&groupName=test_group ,创建路由时指定服务发现类型为 nacos 。 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacosWithGroupName/*", "upstream": { @@ -200,8 +198,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f "priority": 0, "uri": "\/nacosWithGroupName\/*" } - }, - "action": "set" + } } ``` @@ -211,7 +208,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f 该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns&groupName=test_group ,创建路由时指定服务发现类型为 nacos 。 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/4 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/4 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/nacosWithNamespaceIdAndGroupName/*", "upstream": { @@ -252,7 +249,6 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/4 -H 'X-API-KEY: edd1c9f034335f "priority": 0, "uri": "\/nacosWithNamespaceIdAndGroupName\/*" } - }, - "action": "set" + } } ``` diff --git a/docs/zh/latest/discovery/zookeeper.md b/docs/zh/latest/discovery/zookeeper.md deleted file mode 100644 index db2bec30103c..000000000000 --- a/docs/zh/latest/discovery/zookeeper.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: zookeeper -keywords: - - APISIX - - ZooKeeper - - apisix-seed -description: 本篇文档介绍了如何使用 ZooKeeper 做服务发现 ---- - - - -目前,如果你想在 APISIX 控制面使用 ZooKeeper 实现服务发现功能,需要依赖 [apisix-seed](https://github.com/api7/apisix-seed) 项目。 - -## `apisix-seed` 工作原理 - -![APISIX-SEED](../../../assets/images/apisix-seed.svg) - -`apisix-seed` 通过同时监听 etcd 和 ZooKeeper 的变化来完成数据交换。 - -流程如下: - -1. 使用 APISIX 注册一个上游服务,并将服务类型设置为 `zookeeper` 并保存到 etcd; -2. `apisix-seed` 监听 etcd 中 APISIX 的资源变更,并过滤服务发现类型获得服务名称; -3. `apisix-seed` 将服务绑定到 etcd 资源,并开始在 ZooKeeper 中监控此服务; -4. 客户端向 ZooKeeper 注册该服务; -5. `apisix-seed` 获取 ZooKeeper 中的服务变更; -6. `apisix-seed` 通过服务名称查询绑定的 etcd 资源,并将更新后的服务节点写入 etcd; -7. APISIX Worker 监控 etcd 资源变更,并在内存中刷新服务节点信息。 - -## 如何使用 - -### 环境准备:配置 `apisix-seed` 和 ZooKeeper - -1. 启动 ZooKeeper - -```bash -docker run -itd --rm --name=dev-zookeeper -p 2181:2181 zookeeper:3.7.0 -``` - -2. 下载并编译 `apisix-seed` 项目 - -```bash -git clone https://github.com/api7/apisix-seed.git -cd apisix-seed -go build -``` - -3. 参考以下信息修改 `apisix-seed` 配置文件,路径为 `conf/conf.yaml` - -```bash -etcd: # APISIX etcd 配置 - host: - - "http://127.0.0.1:2379" - prefix: /apisix - timeout: 30 - -discovery: - zookeeper: # 配置 ZooKeeper 进行服务发现 - hosts: - - "127.0.0.1:2181" # ZooKeeper 服务器地址 - prefix: /zookeeper - weight: 100 # ZooKeeper 节点默认权重设为 100 - timeout: 10 # ZooKeeper 会话超时时间默认设为 10 秒 -``` - -4. 启动 `apisix-seed` 以监听服务变更 - -```bash -./apisix-seed -``` - -### 设置 APISIX 路由和上游 - -通过以下命令设置路由,请求路径设置为 `/zk/*`,上游使用 ZooKeeper 作为服务发现,服务名称为 `APISIX-ZK`。 - -```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ --H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' -{ - "uri": "/zk/*", - "upstream": { - "service_name": "APISIX-ZK", - "type": "roundrobin", - "discovery_type": "zookeeper" - } -}' -``` - -### 注册服务 - -使用 ZooKeeper-cli 注册服务 - -登录 ZooKeeper 容器,使用 CLI 程序进行服务注册。具体命令如下: - -```bash -# 登陆容器 -docker exec -it ${CONTAINERID} /bin/bash -# 登陆 ZooKeeper 客户端 -oot@ae2f093337c1:/apache-zookeeper-3.7.0-bin# ./bin/zkCli.sh -# 注册服务 -[zk: localhost:2181(CONNECTED) 0] create /zookeeper/APISIX-ZK '{"host":"127.0.0.1","port":1980,"weight":100}' -``` - -返回结果如下: - -```bash -Created /zookeeper/APISIX-ZK -``` - -### 请求验证 - -通过以下命令请求路由: - -```bash -curl -i http://127.0.0.1:9080/zk/hello -``` - -正常返回结果: - -```bash -HTTP/1.1 200 OK -Connection: keep-alive -... -hello -``` diff --git a/docs/zh/latest/external-plugin.md b/docs/zh/latest/external-plugin.md index 3e8049f8631d..07b8fbaa35ab 100644 --- a/docs/zh/latest/external-plugin.md +++ b/docs/zh/latest/external-plugin.md @@ -32,6 +32,7 @@ APISIX 支持使用 Lua 语言编写插件,这种类型的插件在 APISIX 内 ![external-plugin](../../assets/images/external-plugin.png) 当你在 APISIX 中配置了一个 Plugin Runner ,APISIX 将以子进程的方式运行该 Plugin Runner 。 + 该子进程与 APISIX 进程从属相同用户。当重启或者重新加载 APISIX 时,该 Plugin Runner 也将被重启。 一旦你为指定路由配置了 `ext-plugin-*` 插件, diff --git a/docs/zh/latest/getting-started.md b/docs/zh/latest/getting-started.md index b40736269ee2..dcad6d12286f 100644 --- a/docs/zh/latest/getting-started.md +++ b/docs/zh/latest/getting-started.md @@ -154,7 +154,7 @@ docker-compose -p docker-apisix -f docker-compose-arm64.yml up -d :::info IMPORTANT -请确保其他系统进程没有占用 **9080、9443 和 2379** 端口。 +请确保其他系统进程没有占用 **9080、9180、9443 和 2379** 端口。 在基于 UNIX 的系统中,可以使用以下命令来终止指定监听端口的运行: @@ -174,7 +174,7 @@ docker logs -f --tail $ ```bash # 注意:请在运行 Docker 的宿主机上执行 curl 命令。 -curl "http://127.0.0.1:9080/apisix/admin/services/" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +curl "http://127.0.0.1:9180/apisix/admin/services/" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ``` 如果返回数据如下所示,则表示 APISIX 成功启动: @@ -182,7 +182,6 @@ curl "http://127.0.0.1:9080/apisix/admin/services/" -H 'X-API-KEY: edd1c9f034335 ```json { "count":0, - "action":"get", "node":{ "key":"/apisix/services", "nodes":[], @@ -200,7 +199,7 @@ APISIX 提供了强大的 [Admin API](./admin-api.md) 和 [Dashboard](https://gi 以下示例代码中,我们将为路由配置匹配规则,以便 APISIX 可以将请求转发到对应的上游服务: ```bash -curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "methods": ["GET"], "host": "example.com", @@ -233,7 +232,7 @@ curl -i -X GET "http://127.0.0.1:9080/anything/foo?arg=10" -H "Host: example.com 你可以通过以下命令创建一个上游,并在路由中使用它,而不是直接将其配置在路由中: ```bash -curl "http://127.0.0.1:9080/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "type": "roundrobin", "nodes": { @@ -247,7 +246,7 @@ curl "http://127.0.0.1:9080/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f0343 上游服务创建完成后,可以通过以下命令绑定到指定路由: ```bash -curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "uri": "/get", "host": "httpbin.org", diff --git a/docs/zh/latest/grpc-proxy.md b/docs/zh/latest/grpc-proxy.md index 33dab7b76f98..90339f3e5fd2 100644 --- a/docs/zh/latest/grpc-proxy.md +++ b/docs/zh/latest/grpc-proxy.md @@ -40,7 +40,7 @@ title: gRPC 代理 * 下面例子所代理的 gRPC 服务可供参考:[grpc_server_example](https://github.com/api7/grpc_server_example)。 ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["POST", "GET"], "uri": "/helloworld.Greeter/SayHello", @@ -100,7 +100,7 @@ grpcurl -plaintext -import-path /pathtoprotos -proto helloworld.proto \ 如果你的 gRPC 服务使用了自己的 TLS 加密,即所谓的 `gPRCS` (gRPC + TLS),那么需要修改 scheme 为 `grpcs`。继续上面的例子,50052 端口上跑的是 gPRCS 的服务,这时候应该这么配置: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["POST", "GET"], "uri": "/helloworld.Greeter/SayHello", diff --git a/docs/zh/latest/health-check.md b/docs/zh/latest/health-check.md index 3cd1e7789615..b165ad561368 100644 --- a/docs/zh/latest/health-check.md +++ b/docs/zh/latest/health-check.md @@ -62,7 +62,7 @@ Apache APISIX 的健康检查使用 [lua-resty-healthcheck](https://github.com/a ### 配置示例: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { diff --git a/docs/zh/latest/install-dependencies.md b/docs/zh/latest/install-dependencies.md index 94cd5ab53052..3070d5401351 100644 --- a/docs/zh/latest/install-dependencies.md +++ b/docs/zh/latest/install-dependencies.md @@ -31,8 +31,6 @@ title: 安装依赖 - 在某些平台上,通过包管理器安装 LuaRocks 会导致 Lua 被升级为 Lua 5.3,所以我们建议通过源代码的方式安装 LuaRocks。如果你通过官方仓库安装 OpenResty 和 OpenResty 的 OpenSSL 开发库(rpm 版本:openresty-openssl111-devel,deb 版本:openresty-openssl111-dev),那么 [我们提供了自动安装的脚本](https://github.com/apache/apisix/tree/master/utils/linux-install-luarocks.sh)。如果你是自己编译的 OpenResty,可以参考上述脚本并修改里面的路径。如果编译时没有指定 OpenSSL 库的路径,那么无需配置 LuaRocks 内跟 OpenSSL 相关的变量,因为默认都是用的系统自带的 OpenSSL。如果编译时指定了 OpenSSL 库,那么需要保证 LuaRocks 的 OpenSSL 配置跟 OpenResty 的相一致。 -- 警告:如果你正在使用低于 `1.17.8` 的 OpenResty 版本,请安装 openresty-openssl-devel,而不是 openresty-openssl111-devel。 - - OpenResty 是 APISIX 的一个依赖项,如果是第一次部署 APISIX 并且不需要使用 OpenResty 部署其他服务,可以在 OpenResty 安装完成后停止并禁用 OpenResty,这不会影响 APISIX 的正常工作,请根据自己的业务谨慎操作。例如 Ubuntu:`systemctl stop openresty && systemctl disable openresty`。 ## 安装 diff --git a/docs/zh/latest/installation-guide.md b/docs/zh/latest/installation-guide.md index 8fb3d7f80fc8..5b0ea80e3aed 100644 --- a/docs/zh/latest/installation-guide.md +++ b/docs/zh/latest/installation-guide.md @@ -44,6 +44,7 @@ import TabItem from '@theme/TabItem'; {label: 'Docker', value: 'docker'}, {label: 'Helm', value: 'helm'}, {label: 'RPM', value: 'rpm'}, + {label: 'Source Code', value: 'source code'}, ]}> @@ -169,6 +170,12 @@ apisix start ::: + + + + +如果你想要使用源码构建 APISIX,请参考[源码安装 APISIX](./building-apisix.md)。 + @@ -188,7 +195,7 @@ APISIX 使用 [etcd](https://github.com/etcd-io/etcd) 作为配置中心进行 ```shell -ETCD_VERSION='3.4.18' +ETCD_VERSION='3.5.4' wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ cd etcd-v${ETCD_VERSION}-linux-amd64 && \ @@ -210,13 +217,50 @@ brew services start etcd ## 后续操作 +### 配置 APISIX + +通过修改本地 `./conf/config.yaml` 文件,或者在启动 APISIX 时使用 `-c` 或 `--config` 添加文件路径参数 `apisix start -c `,完成对 APISIX 服务本身的基本配置。 + +比如将 APISIX 默认监听端口修改为 8000,其他配置保持默认,在 `./conf/config.yaml` 中只需这样配置: + +```yaml title=“./conf/config.yaml” +apisix: + node_listen: 8000 # APISIX listening port +``` + +比如指定 APISIX 默认监听端口为 8000,并且设置 etcd 地址为 `http://foo:2379`,其他配置保持默认。在 `./conf/config.yaml` 中只需这样配置: + +```yaml title=“./conf/config.yaml” +apisix: + node_listen: 8000 # APISIX listening port + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://foo:2379" +``` + +:::warning + +APISIX 的默认配置可以在 `./conf/config-default.yaml` 文件中看到,该文件与 APISIX 源码强绑定,请不要手动修改 `./conf/config-default.yaml` 文件。如果需要自定义任何配置,都应在 `./conf/config.yaml` 文件中完成。 +::: + +:::warning + +请不要手动修改 APISIX 安装目录下的 `./conf/nginx.conf` 文件。当 APISIX 启动时,会根据 `config.yaml` 的配置自动生成新的 `nginx.conf` 并自动启动服务。 + +::: + ### 更新 Admin API key 建议修改 Admin API 的 key,保护 APISIX 的安全。 请参考如下信息更新配置文件: -```yaml title="conf/config.yaml" +```yaml title="./conf/config.yaml" apisix: admin_key - @@ -228,7 +272,7 @@ apisix: 更新完成后,你可以使用新的 key 访问 Admin API: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes?api_key=newsupersecurekey -i +curl http://127.0.0.1:9180/apisix/admin/routes?api_key=newsupersecurekey -i ``` ### 为 APISIX 添加 systemd 配置文件 diff --git a/docs/zh/latest/mtls.md b/docs/zh/latest/mtls.md index 07ab50e3183f..414adb87684f 100644 --- a/docs/zh/latest/mtls.md +++ b/docs/zh/latest/mtls.md @@ -36,7 +36,9 @@ title: TLS 双向认证 2. 修改 `conf/config.yaml` 中的配置项: ```yaml - port_admin: 9180 + admin_listen: + ip: 127.0.0.1 + port: 9180 https_admin: true admin_api_mtls: @@ -69,10 +71,14 @@ curl --cacert /data/certs/mtls_ca.crt --key /data/certs/mtls_client.key --cert / 你需要构建 [APISIX-Base](./FAQ.md#如何构建-APISIX-Base-环境?),并且需要在配置文件中设定 `etcd.tls` 来使 ETCD 的双向认证功能正常工作。 ```yaml -etcd: - tls: - cert: /data/certs/etcd_client.pem # path of certificate used by the etcd client - key: /data/certs/etcd_client.key # path of key used by the etcd client +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + tls: + cert: /data/certs/etcd_client.pem # path of certificate used by the etcd client + key: /data/certs/etcd_client.key # path of key used by the etcd client ``` 如果 APISIX 不信任 etcd server 使用的 CA 证书,我们需要设置 CA 证书。 @@ -126,7 +132,7 @@ if len(sys.argv) >= 5: reqParam["client"]["ca"] = clientCert if len(sys.argv) >= 6: reqParam["client"]["depth"] = int(sys.argv[5]) -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json=reqParam, headers={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json=reqParam, headers={ "X-API-KEY": api_key, }) print(resp.status_code) @@ -154,7 +160,7 @@ curl --resolve 'mtls.test.com::' "https:// 关于熔断超时逻辑 +:::note 注意 -由代码逻辑自动按**触发不健康状态**的次数递增运算: +关于熔断超时逻辑,由代码逻辑自动按**触发不健康状态**的次数递增运算: -每当上游服务返回 `unhealthy.http_statuses` 配置中的状态码(比如:500),达到 `unhealthy.failures` 次时 (比如:3 次),认为上游服务处于不健康状态。 +当上游服务返回 `unhealthy.http_statuses` 配置中的状态码(默认为 `500`),并达到 `unhealthy.failures` 预设次数时(默认为 3 次),则认为上游服务处于不健康状态。 -第一次触发不健康状态,**熔断 2 秒**。 +第一次触发不健康状态时,熔断 2 秒。超过熔断时间后,将重新开始转发请求到上游服务,如果继续返回 `unhealthy.http_statuses` 状态码,记数再次达到 `unhealthy.failures` 预设次数时,熔断 4 秒。依次类推(2,4,8,16,……),直到达到预设的 `max_breaker_sec`值。 -然后,2 秒过后重新开始转发请求到上游服务,如果继续返回 `unhealthy.http_statuses` 状态码,记数再次达到 `unhealthy.failures` 次时,**熔断 4 秒**(倍数方式)。 +当上游服务处于不健康状态时,如果转发请求到上游服务并返回 `healthy.http_statuses` 配置中的状态码(默认为 `200`),并达到 `healthy.successes` 次时,则认为上游服务恢复至健康状态。 -依次类推,2, 4, 8, 16, 32, 64, ..., 256,最大到 300。 300 是 `max_breaker_sec` 的最大值,允许自定义修改。 - -在不健康状态时,当转发请求到上游服务并返回 `healthy.http_statuses` 配置中的状态码(比如:200),达到 `healthy.successes` 次时 (比如:3 次),认为上游服务恢复健康状态。 +::: ## 属性 | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | | ----------------------- | -------------- | ------ | ---------- | --------------- | -------------------------------- | -| break_response_code | integer | 必须 | 无 | [200, ..., 599] | 不健康返回错误码 | -| break_response_body | string | 可选 | 无 | | 不健康返回报文 | -| break_response_headers | array[object] | 可选 | 无 | [{"key":"header_name","value":"can contain Nginx $var"}] | 不健康返回报文头,这里可以设置多个。该字段仅在 `break_response_body` 被配置时生效。这个值能够以 `$var` 的格式包含 APISIX 变量,比如 `{"key":"X-Client-Addr","value":"$remote_addr:$remote_port"}`。 | -| max_breaker_sec | integer | 可选 | 300 | >=3 | 最大熔断持续时间 | -| unhealthy.http_statuses | array[integer] | 可选 | {500} | [500, ..., 599] | 不健康时候的状态码 | -| unhealthy.failures | integer | 可选 | 3 | >=1 | 触发不健康状态的连续错误请求次数 | -| healthy.http_statuses | array[integer] | 可选 | {200} | [200, ..., 499] | 健康时候的状态码 | -| healthy.successes | integer | 可选 | 3 | >=1 | 触发健康状态的连续正常请求次数 | +| break_response_code | integer | 是 | | [200, ..., 599] | 当上游服务处于不健康状态时返回的 HTTP 错误码。 | +| break_response_body | string | 否 | | | 当上游服务处于不健康状态时返回的 HTTP 响应体信息。 | +| break_response_headers | array[object] | 否 | | [{"key":"header_name","value":"can contain Nginx $var"}] | 当上游服务处于不健康状态时返回的 HTTP 响应头信息。该字段仅在配置了 `break_response_body` 属性时生效,并能够以 `$var` 的格式包含 APISIX 变量,比如 `{"key":"X-Client-Addr","value":"$remote_addr:$remote_port"}`。 | +| max_breaker_sec | integer | 否 | 300 | >=3 | 上游服务熔断的最大持续时间,以秒为单位。 | +| unhealthy.http_statuses | array[integer] | 否 | [500] | [500, ..., 599] | 上游服务处于不健康状态时的 HTTP 状态码。 | +| unhealthy.failures | integer | 否 | 3 | >=1 | 上游服务在一定时间内触发不健康状态的异常请求次数。 | +| healthy.http_statuses | array[integer] | 否 | [200] | [200, ..., 499] | 上游服务处于健康状态时的 HTTP 状态码。 | +| healthy.successes | integer | 否 | 3 | >=1 | 上游服务触发健康状态的连续正常请求次数。 | -## 启用方式 +## 启用插件 -这是一个示例,在指定的路由上启用 `api-breaker` 插件。 -应答 500 或 503 连续 3 次,触发熔断。应答 200 连续 1 次,恢复健康。 +以下示例展示了如何在指定路由上启用 `api-breaker` 插件,该路由配置表示在一定时间内返回 `500` 或 `503` 状态码达到 3 次后触发熔断,返回 `200` 状态码 1 次后恢复健康: ```shell -curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "api-breaker": { @@ -85,24 +88,33 @@ curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H 'X-API-KEY: edd1c9f034335f ## 测试插件 -使用上游的配置,如果你的上流服务返回 500,连续 3 次。客户端将会收到 502(break_response_code)应答。 +按上述配置启用插件后,使用 `curl` 命令请求该路由: ```shell -$ curl -i "http://127.0.0.1:9080/hello" -HTTP/1.1 502 Bad Gateway -Content-Type: application/octet-stream -Connection: keep-alive -Server: APISIX/1.5 +curl -i -X POST "http://127.0.0.1:9080/hello" +``` + +如果上游服务在一定时间内返回 `500` 状态码达到 3 次,客户端将会收到 `502 Bad Gateway` 的应答: -... ... +```shell +HTTP/1.1 502 Bad Gateway +... + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + ``` ## 禁用插件 -当想禁用 `api-breaker` 插件时,非常简单,只需要在插件配置中删除相应的 json 配置,无需重启服务,即可立即生效: +当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream": { @@ -113,5 +125,3 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 } }' ``` - -`api-breaker` 插件现在已被禁用,它也适用于其他插件。 diff --git a/docs/zh/latest/plugins/authz-casbin.md b/docs/zh/latest/plugins/authz-casbin.md index a224212ccebd..1045873321e4 100644 --- a/docs/zh/latest/plugins/authz-casbin.md +++ b/docs/zh/latest/plugins/authz-casbin.md @@ -65,7 +65,7 @@ description: 本文介绍了关于 Apache APISIX `authz-casbin` 插件的基本 以下示例展示了通过 model/policy 配置文件来设置 Casbin 身份验证: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -90,7 +90,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 以下示例展示了通过你的 model/policy 文本来设置 Casbin 身份验证: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -134,7 +134,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 所有通过这种方式创建的 Route 都会带有一个带插件元数据配置的 Casbin enforcer。你也可以使用这种方式更新 model/policy,该插件将会自动同步最新的配置信息。 ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/authz-casbin \ +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/authz-casbin \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -i -X PUT -d ' { "model": "[request_definition] @@ -161,7 +161,7 @@ g, alice, admin" 更新插件元数据后,可以将插件添加到指定 Route 中: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -245,7 +245,7 @@ curl -i http://127.0.0.1:9080/res -H 'user: alice' -X GET 当你需要禁用 `authz-casbin` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], diff --git a/docs/zh/latest/plugins/authz-casdoor.md b/docs/zh/latest/plugins/authz-casdoor.md index 5876c06bdf3f..a0ed3d9cccb0 100644 --- a/docs/zh/latest/plugins/authz-casdoor.md +++ b/docs/zh/latest/plugins/authz-casdoor.md @@ -53,7 +53,7 @@ description: 本篇文档介绍了 Apache APISIX auth-casdoor 插件的相关信 以下示例展示了如何在指定路由上启用 `auth-casdoor` 插件: ```shell -curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' { "methods": ["GET"], "uri": "/anything/*", @@ -87,7 +87,7 @@ curl "http://127.0.0.1:9080/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f 当需要禁用 `authz-casdoor` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/anything/*", diff --git a/docs/zh/latest/plugins/authz-keycloak.md b/docs/zh/latest/plugins/authz-keycloak.md index 7cc1ae7fcd8c..6bc5c7b9691a 100644 --- a/docs/zh/latest/plugins/authz-keycloak.md +++ b/docs/zh/latest/plugins/authz-keycloak.md @@ -46,8 +46,7 @@ description: 本文介绍了关于 Apache APISIX `authz-keycloak` 插件的基 | discovery | string | 否 | | https://host.domain/auth/realms/foo/.well-known/uma2-configuration | Keycloak 授权服务的 [discovery document](https://www.keycloak.org/docs/14.0/authorization_services/#_service_authorization_api) 的 URL。 | | token_endpoint | string | 否 | | https://host.domain/auth/realms/foo/protocol/openid-connect/token | 接受 OAuth2 兼容 token 的接口,需要支持 `urn:ietf:params:oauth:grant-type:uma-ticket` 授权类型。 | | resource_registration_endpoint | string | 否 | | https://host.domain/auth/realms/foo/authz/protection/resource_set | 符合 UMA 的资源注册端点。如果提供,则覆盖发现中的值。 | -| client_id | string | 否 | | | 客户端正在寻求访问的资源服务器的标识符。需要 `client_id` 或 `audience`。 | -| audience | string | 否 | | | 遗留参数。现在被 `client_id` 替换,以保持向后兼容性。需要 `client_id` 或 `audience`。 | +| client_id | string | 是 | | | 客户端正在寻求访问的资源服务器的标识符。 | | client_secret | string | 否 | | | 客户端密码(如果需要)。 | | grant_type | string | 否 | "urn:ietf:params:oauth:grant-type:uma-ticket" | ["urn:ietf:params:oauth:grant-type:uma-ticket"] | | | policy_enforcement_mode | string | 否 | "ENFORCING" | ["ENFORCING", "PERMISSIVE"] | | @@ -73,7 +72,7 @@ description: 本文介绍了关于 Apache APISIX `authz-keycloak` 插件的基 - 使用 `discovery` 属性后,`authz-keycloak` 插件就可以从其 URL 中发现 Keycloak API 的端点。该 URL 指向 Keyloak 针对相应领域授权服务的发现文档。 - 如果发现文档可用,则插件将根据该文档确定令牌端点 URL。如果 URL 存在,则 `token_endpoint` 和 `resource_registration_endpoint` 的值将被其覆盖。 - Client ID and secret - - 该插件需配置 `client_id` 或 `audience`(用于向后兼容)属性来标识自身,如果两者都已经配置,则 `client_id` 优先级更高。 + - 该插件需配置 `client_id` 属性来标识自身。 - 如果 `lazy_load_paths` 属性被设置为 `true`,那么该插件还需要从 Keycloak 中获得一个自身访问令牌。在这种情况下,如果客户端对 Keycloak 的访问是加密的,就需要配置 `client_secret` 属性。 - Policy enforcement mode - `policy_enforcement_mode` 属性指定了在处理发送到服务器的授权请求时,该插件如何执行策略。 @@ -130,7 +129,7 @@ description: 本文介绍了关于 Apache APISIX `authz-keycloak` 插件的基 以下示例为你展示了如何在指定 Route 中启用 `authz-keycloak` 插件,其中 `${realm}` 是 Keycloak 中的 `realm` 名称: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get", @@ -138,7 +137,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ "authz-keycloak": { "token_endpoint": "http://127.0.0.1:8090/auth/realms/${realm}/protocol/openid-connect/token", "permissions": ["resource name#scope name"], - "audience": "Client ID" + "client_id": "Client ID" } }, "upstream": { @@ -176,7 +175,7 @@ curl http://127.0.0.1:9080/get \ 当你需要禁用 `authz-keycloak` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get", diff --git a/docs/zh/latest/plugins/aws-lambda.md b/docs/zh/latest/plugins/aws-lambda.md index 9662d65c779a..fb0fdb960bc5 100644 --- a/docs/zh/latest/plugins/aws-lambda.md +++ b/docs/zh/latest/plugins/aws-lambda.md @@ -63,7 +63,7 @@ description: 本文介绍了关于 Apache APISIX aws-lambda 插件的基本信 以下示例展示了如何在指定路由上启用 `aws-lambda` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "aws-lambda": { @@ -129,7 +129,7 @@ content-type: application/json 以下示例展示了如何通过配置文件实现授权: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "aws-lambda": { @@ -166,7 +166,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 以下示例展示了如何通过配置文件实现路径转发: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "aws-lambda": { @@ -203,7 +203,7 @@ Content-Type: application/json 当你需要禁用 `aws-lambda` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/aws", "plugins": {}, diff --git a/docs/zh/latest/plugins/azure-functions.md b/docs/zh/latest/plugins/azure-functions.md index dac483406772..4664e09fa82e 100644 --- a/docs/zh/latest/plugins/azure-functions.md +++ b/docs/zh/latest/plugins/azure-functions.md @@ -68,7 +68,7 @@ description: 本文介绍了关于 Apache APISIX azure-functions 插件的基本 如果你想添加一个新的 API 密钥,请向 `/apisix/admin/plugin_metadata` 端点发出请求,并附上所需的元数据。示例如下: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/azure-functions -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/azure-functions -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "master_apikey" : "" }' @@ -79,7 +79,7 @@ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/azure-functions -H 'X-AP 以下示例展示了如何在指定路由上启用 `azure-functions` 插件。请确保你的 Azure Functions 已提前部署好,并正常提供服务。 ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "azure-functions": { @@ -152,7 +152,7 @@ Hello, APISIX 以下示例展示了如何通过配置文件实现路径转发: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "azure-functions": { @@ -188,7 +188,7 @@ Hello, APISIX 当你需要禁用 `azure-functions` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/azure", "plugins": {}, diff --git a/docs/zh/latest/plugins/basic-auth.md b/docs/zh/latest/plugins/basic-auth.md index 69ee41526de7..b324e128cc8e 100644 --- a/docs/zh/latest/plugins/basic-auth.md +++ b/docs/zh/latest/plugins/basic-auth.md @@ -53,7 +53,7 @@ Route 端: 如果需要启用插件,就必须创建一个具有身份验证配置的 Consumer: ```shell -curl http://127.0.0.1:9080/apisix/admin/consumers \ +curl http://127.0.0.1:9180/apisix/admin/consumers \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "foo", @@ -77,7 +77,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers \ 创建 Consumer 后,就可以通过配置 Route 或 Service 来验证插件,以下是配置 Route 的命令: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], @@ -99,7 +99,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 通过上述命令启用插件后,可以通过以下方法测试插件。 ```shell -curl -i -ubar:bar http://127.0.0.1:9080/hello +curl -i -ufoo:bar http://127.0.0.1:9080/hello ``` 如果配置成功则返回如下结果: @@ -131,7 +131,7 @@ HTTP/1.1 401 Unauthorized 当你需要禁用 `basic-auth` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], diff --git a/docs/zh/latest/plugins/batch-requests.md b/docs/zh/latest/plugins/batch-requests.md index 067031227ea5..1a02bf17bc52 100644 --- a/docs/zh/latest/plugins/batch-requests.md +++ b/docs/zh/latest/plugins/batch-requests.md @@ -69,7 +69,7 @@ plugins: 默认情况下,可以发送到 `/apisix/batch-requests` 的最大请求体不能大于 1 MiB。 你可以通过 `apisix/admin/plugin_metadata/batch-requests` 更改插件的此配置: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/batch-requests \ +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/batch-requests \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "max_body_size": 4194304 @@ -88,12 +88,12 @@ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/batch-requests \ ### 请求参数 -| 参数名 | 类型 | 必选项 | 默认值 | 描述 | -| -------- | --------------------------- | ------ | ------ | -------------------------------- | -| query | object | 否 | | 给所有请求都携带的 `query string`。 | -| headers | object | 否 | | 给所有请求都携带的 `header`。 | -| timeout | number | 否 | 30000 | 聚合请求的超时时间,单位为 `ms`。 | -| pipeline | [HttpRequest](#httprequest) | 是 | | HTTP 请求的详细信息。 | +| 参数名 | 类型 | 必选项 | 默认值 | 描述 | +| -------- |------------------------------------| ------ | ------ | -------------------------------- | +| query | object | 否 | | 给所有请求都携带的 `query string`。 | +| headers | object | 否 | | 给所有请求都携带的 `header`。 | +| timeout | number | 否 | 30000 | 聚合请求的超时时间,单位为 `ms`。 | +| pipeline | array[[HttpRequest](#httprequest)] | 是 | | HTTP 请求的详细信息。 | #### HttpRequest @@ -127,7 +127,7 @@ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/batch-requests \ 只需要在创建路由时设置所需的 URI 并更改 `public-api` 插件的配置: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/br \ +curl http://127.0.0.1:9180/apisix/admin/routes/br \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/batch-requests", @@ -144,7 +144,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/br \ 首先,你需要为 `batch-requests` 插件的 API 创建一个路由,它将使用 [public-api](../../../en/latest/plugins/public-api.md) 插件。 ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/apisix/batch-requests", diff --git a/docs/zh/latest/plugins/clickhouse-logger.md b/docs/zh/latest/plugins/clickhouse-logger.md index 4ae170522618..9b0ba11e7373 100644 --- a/docs/zh/latest/plugins/clickhouse-logger.md +++ b/docs/zh/latest/plugins/clickhouse-logger.md @@ -1,5 +1,11 @@ --- title: clickhouse-logger +keywords: + - APISIX + - API 网关 + - Plugin + - ClickHouse +description: 本文介绍了 API 网关 Apache APISIX 如何使用 clickhouse-logger 插件将日志数据发送到 ClickHouse 数据库中。 --- + +## 描述 + +`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储。 + +启用该插件后 APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。更多信息,请参考 [Batch-Processor](./batch-processor.md)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ | +| endpoint_addr | string | 是 | | Elasticsearch API。 | +| field | array | 是 | | Elasticsearch `field`配置信息。 | +| field.index | string | 是 | | Elasticsearch `[_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field)`。 | +| field.type | string | 否 | Elasticsearch 默认值 | Elasticsearch `[_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field)` | +| auth | array | 否 | | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 配置信息 | +| auth.username | string | 是 | | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 用户名。 | +| auth.password | string | 是 | | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 密码。 | +| ssl_verify | boolean | 否 | true | 当设置为 `true` 时则启用 SSL 验证。更多信息请参考 [lua-nginx-module](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake)。 | +| timeout | integer | 否 | 10 | 发送给 Elasticsearch 请求超时时间。 | + +本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。 + +## 启用插件 + +你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件: + +### 完整配置示例 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services", + "type":"collector" + }, + "auth":{ + "username":"elastic", + "password":"123456" + }, + "ssl_verify":false, + "timeout": 60, + "retry_delay":1, + "buffer_duration":60, + "max_retry_count":0, + "batch_max_size":1000, + "inactive_timeout":5, + "name":"elasticsearch-logger" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +### 最小化配置示例 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "elasticsearch-logger":{ + "endpoint_addr":"http://127.0.0.1:9200", + "field":{ + "index":"services" + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` + +## 测试插件 + +向配置 `elasticsearch-logger` 插件的路由发送请求 + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +现在,你可以从 Elasticsearch 获取相关日志。 + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "M1qAxYIBRmRqWkmH4Wya", + "_score": 1, + "_source": { + "apisix_latency": 0, + "route_id": "1", + "server": { + "version": "2.15.0", + "hostname": "apisix" + }, + "request": { + "size": 102, + "uri": "/elasticsearch.do?q=hello", + "querystring": { + "q": "hello" + }, + "headers": { + "user-agent": "curl/7.29.0", + "host": "127.0.0.1:9080", + "accept": "*/*" + }, + "url": "http://127.0.0.1:9080/elasticsearch.do?q=hello", + "method": "GET" + }, + "service_id": "", + "latency": 0, + "upstream": "127.0.0.1:1980", + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "start_time": 1661170929107, + "response": { + "size": 192, + "headers": { + "date": "Mon, 22 Aug 2022 12:22:09 GMT", + "server": "APISIX/2.15.0", + "content-type": "text/plain; charset=utf-8", + "connection": "close", + "transfer-encoding": "chunked" + }, + "status": 200 + } + } + } + ] + } +} +``` + +## 插件元数据设置 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------- | ------ | ------ | ------------------------------------------------------------ | ------ | ------------------------------------------------------------ | +| log_format | object | 可选 | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。请注意,**该设置是全局生效的**,因此在指定 log_format 后,将对所有绑定 elasticsearch-logger 的 Route 或 Service 生效。 | + +### 设置日志格式示例 + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +在日志收集处,将得到类似下面的日志: + +```json +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +向配置 `elasticsearch-logger` 插件的路由发送请求 + +```shell +curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello +HTTP/1.1 200 OK +... +hello, world +``` + +现在,你可以从 Elasticsearch 获取相关日志。 + +```shell +curl -X GET "http://127.0.0.1:9200/services/_search" | jq . +{ + "took": 0, + ... + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": 1, + "hits": [ + { + "_index": "services", + "_type": "_doc", + "_id": "NVqExYIBRmRqWkmH4WwG", + "_score": 1, + "_source": { + "@timestamp": "2022-08-22T20:26:31+08:00", + "client_ip": "127.0.0.1", + "host": "127.0.0.1", + "route_id": "1" + } + } + ] + } +} +``` + +### 禁用插件元数据 + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +``` + +## 禁用插件 + +当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{}, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/elasticsearch.do" +}' +``` diff --git a/docs/zh/latest/plugins/error-log-logger.md b/docs/zh/latest/plugins/error-log-logger.md index 2c35057a9bab..02c717e6f2c1 100644 --- a/docs/zh/latest/plugins/error-log-logger.md +++ b/docs/zh/latest/plugins/error-log-logger.md @@ -1,5 +1,11 @@ --- title: error-log-logger +keywords: + - APISIX + - API 网关 + - 错误日志 + - Plugin +description: API 网关 Apache APISIX error-log-logger 插件用于将 APISIX 的错误日志推送到 TCP、Apache SkyWalking 或 ClickHouse 服务器。 --- + +## 描述 + +`ext-plugin-post-resp` 插件用于在执行内置 Lua 插件之前和在 Plugin Runner 内运行特定的 External Plugin。 + +`ext-plugin-post-resp` 插件将在请求获取到上游的响应之后执行。 + +启用本插件之后,APISIX 将使用 [lua-resty-http](https://github.com/api7/lua-resty-http) 库向上游发起请求,这会导致: + +- [proxy-control](./proxy-control.md) 插件不可用 +- [proxy-mirror](./proxy-mirror.md) 插件不可用 +- [proxy-cache](./proxy-cache.md) 插件不可用 +- [APISIX 与上游间的双向认证](../mtls.md#apisix-与上游间的双向认证) 功能尚不可用 + +如果你想了解更多关于 External Plugin 的信息,请参考 [External Plugin](../external-plugin.md) 。 + +:::note + +External Plugin 执行的结果会影响当前请求的响应。 + +External Plugin 尚不支持获取请求的上下文信息。 + +External Plugin 尚不支持获取上游响应的响应体。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------ | ------ | ------- | --------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| conf | array | 否 | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | 在 Plugin Runner 内执行的插件列表的配置。 | +| allow_degradation | boolean| 否 | false | [false, true] | 当 Plugin Runner 临时不可用时是否允许请求继续,当值设置为 `true` 时则自动允许请求继续。 | + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `ext-plugin-post-resp` 插件: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "ext-plugin-post-resp": { + "conf" : [ + {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"} + ] + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +在返回结果中可以看到刚刚配置的 Plugin Runner 已经被触发,同时 `ext-plugin-A` 插件也已经被执行。 + +## 禁用插件 + +当你需要禁用 `ext-plugin-post-resp` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/ext-plugin-pre-req.md b/docs/zh/latest/plugins/ext-plugin-pre-req.md index ce1564e19835..458ecc511c64 100644 --- a/docs/zh/latest/plugins/ext-plugin-pre-req.md +++ b/docs/zh/latest/plugins/ext-plugin-pre-req.md @@ -50,7 +50,7 @@ External Plugin 执行的结果会影响当前请求的行为。 以下示例展示了如何在指定路由中启用 `ext-plugin-pre-req` 插件: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", @@ -84,7 +84,7 @@ curl -i http://127.0.0.1:9080/index.html 当你需要禁用 `ext-plugin-pre-req` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", diff --git a/docs/zh/latest/plugins/fault-injection.md b/docs/zh/latest/plugins/fault-injection.md index 78a8beba2522..dd50740b8275 100644 --- a/docs/zh/latest/plugins/fault-injection.md +++ b/docs/zh/latest/plugins/fault-injection.md @@ -78,7 +78,7 @@ description: 本文介绍了关于 Apache APISIX `fault-injection` 插件的基 你可以在指定路由启用 `fault-injection` 插件,并指定 `abort` 属性。如下所示: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -102,7 +102,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 同样,我们也可以指定 `delay` 属性。如下所示: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -125,7 +125,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 还可以同时为指定路由启用 `fault-injection` 插件,并指定 `abort` 属性和 `delay` 属性的 `vars` 规则。如下所示: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -205,7 +205,7 @@ sys 0m0.010s 你可以在 `fault-injection` 插件中使用 `vars` 规则设置特定规则: ```Shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { @@ -272,7 +272,7 @@ Fault Injection! 当你需要禁用 `fault-injection` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", diff --git a/docs/zh/latest/plugins/file-logger.md b/docs/zh/latest/plugins/file-logger.md index 41de2b65757b..ce464f66ec25 100644 --- a/docs/zh/latest/plugins/file-logger.md +++ b/docs/zh/latest/plugins/file-logger.md @@ -1,5 +1,11 @@ --- title: file-logger +keywords: + - APISIX + - API 网关 + - Plugin + - file-logger +description: API 网关 Apache APISIX file-logger 插件可用于将日志数据存储到指定位置。 --- + +## 描述 + +`loggly` 插件可用于将日志转发到 [SolarWinds Loggly](https://www.solarwinds.com/loggly) 进行分析和存储。 + +当启用插件时,APISIX 会将请求上下文信息序列化为符合 [Loggly Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm?cshid=loggly_streaming-syslog-without-using-files) 的数据格式,即具有 [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424) 兼容标头的 Syslog。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------------------------|---------------|----------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| customer_token | string | 是 | | 将日志发送到 Loggly 时使用的唯一标识符,以确保将日志发送到正确的组织帐户。 | +| severity | string (enum) | 否 | INFO | Syslog 日志事件的严重性级别。 包括:`DEBUG`、`INFO`、`NOTICE`、`WARNING`、`ERR`、`CRIT`、`ALERT` 和 `EMEGR`。 | +| severity_map | object | 否 | nil | 一种将上游 HTTP 响应代码映射到 Syslog 中的方法。 `key-value`,其中 `key` 是 HTTP 响应代码,`value`是 Syslog 严重级别。例如`{"410": "CRIT"}`。 | +| tags | array | 否 | | 元数据将包含在任何事件日志中,以帮助进行分段和过滤。 | +| include_req_body | boolean | 否 | false | 当设置为 `true` 时,包含请求体。**注意**:如果请求体无法完全存放在内存中,由于 NGINX 的限制,APISIX 无法将它记录下来。 | +| include_resp_body | boolean | 否 | false | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志或数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +如果要生成用户令牌,请在 Loggly 系统中的 `/loggly.com/tokens` 设置,或者在系统中单击 `Logs > Source setup > Customer tokens`。 + +## 插件元数据设置 + +你还可以通过插件元数据配置插件。详细配置如下: + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|------------|---------|-------|----------------------|--------------------------------|---------------------------------------------------------------------| +| host | string | 否 | "logs-01.loggly.com" | | 发送日志的主机的端点。 | +| port | integer | 否 | 514 | | 要连接的 Loggly 端口。 仅用于 `syslog` 协议。 | +| timeout | integer | 否 | 5000 | | 发送数据请求超时时间(以毫秒为单位)。 | +| protocol | string | 否 | "syslog" | [ "syslog", "http", "https" ] | 将日志发送到 Loggly 的协议。 | +| log_format | object | 否 | nil | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +APISIX 支持 [Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm)、[HTTP/S](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/http-bulk-endpoint.htm)(批量端点)协议将日志事件发送到 Loggly。**默认情况下 `protocol` 的值为 `syslog`**。该协议允许你通过一些细粒度的控制(基于上游 HTTP 响应代码的日志严重性映射)发送符合 RFC5424 的系统日志事件。但是 HTTP/S 批量端点非常适合以更快的传输速度发送更大量的日志事件。 + +:::note 注意 + +Syslog 协议允许你发送符合 RFC5424 的 syslog 事件并进行细粒度控制。但是在以快速传输速度发送大量日志时,使用 HTTP/S 批量端点会更好。你可以通过以下方式更新元数据以更新使用的协议: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loggly \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "protocol": "http" +}' +``` + +::: + +## 启用插件 + +以下示例展示了如何在指定路由上启用该插件: + +**完整配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + "tags":["apisix", "testroute"], + "severity":"info", + "severity_map":{ + "503": "err", + "410": "alert" + }, + "buffer_duration":60, + "max_retry_count":0, + "retry_delay":1, + "inactive_timeout":2, + "batch_max_size":10 + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +**最小化配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +发出请求后,你就可以在 Loggly 仪表盘上查看相关日志: + +![Loggly Dashboard](../../../assets/images/plugin/loggly-dashboard.png) + +## 禁用插件 + +当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/mocking.md b/docs/zh/latest/plugins/mocking.md index 13ca95342fb4..ec4568837b1a 100644 --- a/docs/zh/latest/plugins/mocking.md +++ b/docs/zh/latest/plugins/mocking.md @@ -126,7 +126,7 @@ JSON Schema 在其字段中支持以下类型: 你可以通过如下命令在指定路由上启用 `mocking` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], @@ -229,7 +229,7 @@ Server: APISIX/2.10.0 当你需要禁用 `mocking` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], diff --git a/docs/zh/latest/plugins/mqtt-proxy.md b/docs/zh/latest/plugins/mqtt-proxy.md index bd97286ef8e4..2eb8eb2208a4 100644 --- a/docs/zh/latest/plugins/mqtt-proxy.md +++ b/docs/zh/latest/plugins/mqtt-proxy.md @@ -1,5 +1,11 @@ --- title: mqtt-proxy +keywords: + - APISIX + - API 网关 + - Plugin + - MQTT Proxy +description: 本文档介绍了 Apache APISIX mqtt-proxy 插件的信息,通过 `mqtt-proxy` 插件可以使用 MQTT 的 `client_id` 进行动态负载平衡。 --- + +## 描述 + +`openfunction` 插件用于将开源的分布式无服务器平台 [CNCF OpenFunction](https://openfunction.dev/) 作为动态上游集成至 APISIX。 + +启用 `openfunction` 插件后,该插件会终止对已配置 URI 的请求,并代表客户端向 OpenFunction 的 function 发起一个新的请求,然后 `openfunction` 插件会将响应信息返回至客户端。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------------------------- | ------- | ------ | ------- | ------------ | ------------------------------------------------------------ | +| function_uri | string | 是 | | | OpenFunction function uri,例如 `https://localhost:30858/default/function-sample`。 | +| ssl_verify | boolean | 否 | true | | 当设置为 `true` 时执行 SSL 验证。 | +| authorization | object | 否 | | | 访问 OpenFunction 的函数的授权凭证。| +| authorization.service_token | string | 否 | | | OpenFunction service token,其格式为 `xxx:xxx`,支持函数入口的 basic auth 认证方式。 | +| timeout | integer | 否 | 3000 ms | [100,...] ms | OpenFunction action 和 HTTP 调用超时时间,以毫秒为单位。 | +| keepalive | boolean | 否 | true | | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | +| keepalive_timeout | integer | 否 | 60000 ms| [1000,...] ms| 当连接空闲时,保持该连接处于活动状态的时间,以毫秒为单位。 | +| keepalive_pool | integer | 否 | 5 | [1,...] | 连接断开之前,可接收的最大请求数。 | + +:::note 注意 + +`timeout` 字段规定了 OpenFunction function 的最大执行时间,以及 APISIX 中 HTTP 客户端的请求超时时间。 + +因为 OpenFunction function 调用可能会耗费很长时间来拉取容器镜像和启动容器,如果 `timeout` 字段的值设置太小,可能会导致大量请求失败。 + +::: + +## 前提条件 + +在使用 `openfunction` 插件之前,你需要通过以下命令运行 OpenFunction。详情参考 [OpenFunction 安装指南](https://openfunction.dev/docs/getting-started/installation/) 。 + +请确保当前环境中已经安装对应版本的 Kubernetes 集群。 + +### 创建并推送函数 + +你可以参考 [OpenFunction 官方示例](https://github.com/OpenFunction/samples) 创建函数。构建函数时,你需要使用以下命令为容器仓库生成一个密钥,才可以将函数容器镜像推送到容器仓库 ( 例如 Docker Hub 或 Quay.io)。 + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= REGISTRY_PASSWORD= +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample/test", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +## 测试插件 + +使用 `curl` 命令测试: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST -d'test' +``` + +正常返回结果: + +``` +hello, test! +``` + +### 配置路径转发 + +`OpenFunction` 插件还支持 URL 路径转发,同时将请求代理到上游的 OpenFunction API 端点。基本请求路径的扩展(如路由 `/hello/*` 中 `*` 的部分)会被添加到插件配置中指定的 `function_uri`。 + +:::info 重要 + +路由上配置的 `uri` 必须以 `*` 结尾,此功能才能正常工作。APISIX 路由是严格匹配的,`*` 表示此 URI 的任何子路径都将匹配到同一路由。 + +::: + +下面的示例配置了此功能: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello/*", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +现在,对路径 `hello/123` 的任何请求都将调用 OpenFunction 插件设置的对应的函数,并转发添加的路径: + +```shell +curl http://127.0.0.1:9080/hello/123 +``` + +```shell +Hello, 123! +``` + +## 禁用插件 + +当你需要禁用 `openfunction` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/openid-connect.md b/docs/zh/latest/plugins/openid-connect.md index 16a8f9bfe47d..d8b69fd63e0b 100644 --- a/docs/zh/latest/plugins/openid-connect.md +++ b/docs/zh/latest/plugins/openid-connect.md @@ -2,10 +2,10 @@ title: openid-connect keywords: - APISIX - - Plugin + - API Gateway - OpenID Connect - - openid-connect -description: 本文介绍了关于 Apache APISIX `openid-connect` 插件的基本信息及使用方法。 + - OIDC +description: OpenID Connect(OIDC)是基于 OAuth 2.0 的身份认证协议,APISIX 可以与支持该协议的身份认证服务对接,如 Okta、Keycloak、Ory Hydra、Authing 等,实现对客户端请求的身份认证。 --- -## 如何提取指标数据 +## 提取指标 -我们可以从指定的 url 中提取指标数据 `/apisix/prometheus/metrics`: +你可以从指定的 URL(默认:`/apisix/prometheus/metrics`)中提取指标数据: ``` curl -i http://127.0.0.1:9091/apisix/prometheus/metrics ``` -把该 uri 地址配置到 prometheus 中去,就会自动完成指标数据提取。 - -例子如下: +你可以将该 URI 地址添加到 Prometheus 中来提取指标数据,配置示例如下: -```yaml +```yaml title="./prometheus.yml" scrape_configs: - job_name: "apisix" - scrape_interval: 15s # 这个值会跟 Prometheus QL 中 rate 函数的时间范围有关系,rate 函数中的时间范围应该至少两倍于该值。 + scrape_interval: 15s # 该值会跟 Prometheus QL 中 rate 函数的时间范围有关系,rate 函数中的时间范围应该至少两倍于该值。 metrics_path: "/apisix/prometheus/metrics" static_configs: - targets: ["127.0.0.1:9091"] ``` -我们也可以在 prometheus 控制台中去检查状态: +现在你可以在 Prometheus 控制台中检查状态: ![checking status on prometheus dashboard](../../../assets/images/plugin/prometheus01.png) ![prometheus apisix in-depth metric view](../../../assets/images/plugin/prometheus02.png) -## 如何修改暴露指标的 uri - -我们可以在 `conf/config.yaml` 的 `plugin_attr` 修改默认的 uri +## 使用 Grafana 绘制指标 -| 名称 | 类型 | 默认值 | 描述 | -| ---------- | ------ | ---------------------------- | -------------- | -| export_uri | string | "/apisix/prometheus/metrics" | 暴露指标的 uri | +`prometheus` 插件导出的指标可以在 Grafana 进行图形化绘制显示。 -配置示例: - -```yaml -plugin_attr: - prometheus: - export_uri: /apisix/metrics -``` - -## Grafana 面板 - -插件导出的指标可以在 Grafana 进行图形化绘制显示。 - -下载 [Grafana dashboard 元数据](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) 并导入到 Grafana 中。 +如果需要进行设置,请下载 [APISIX's Grafana dashboard 元数据](https://github.com/apache/apisix/blob/master/docs/assets/other/json/apisix-grafana-dashboard.json) 并导入到 Grafana 中。 你可以到 [Grafana 官方](https://grafana.com/grafana/dashboards/11719) 下载 `Grafana` 元数据。 @@ -152,46 +161,52 @@ plugin_attr: ## 可用的 HTTP 指标 -* `Status codes`: upstream 服务返回的 HTTP 状态码,可以统计到每个服务或所有服务的响应状态码的次数总和。具有的维度: +`prometheus` 插件可以导出以下指标: + +- Status codes: 上游服务返回的 HTTP 状态码,可以统计到每个服务或所有服务的响应状态码的次数总和。属性如下所示: - | 名称 | 描述 | - | -------------| --------------------| - | code | upstream 服务返回的 HTTP 状态码。 | - | route | 请求匹配的 route 的 `route_id`,未匹配,则默认为空字符串。 | - | matched_uri | 请求匹配的 route 的 `uri`,未匹配,则默认为空字符串。 | - | matched_host | 请求匹配的 route 的 `host`,未匹配,则默认为空字符串。 | - | service | 与请求匹配的 route 的 `service_id`。当路由缺少 service_id 时,则默认为 `$host`。 | - | consumer | 与请求匹配的 consumer 的 `consumer_name`。未匹配,则默认为空字符串。 | - | node | 命中的 upstream 节点 `ip`。| + | 名称 | 描述 | + | -------------| ----------------------------------------------------------------------------- | + | code | 上游服务返回的 HTTP 状态码。 | + | route | 与请求匹配的路由的 `route_id`,如果未匹配,则默认为空字符串。 | + | matched_uri | 与请求匹配的路由的 `uri`,如果未匹配,则默认为空字符串。 | + | matched_host | 与请求匹配的路由的 `host`,如果未匹配,则默认为空字符串。 | + | service | 与请求匹配的路由的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 | + | consumer | 与请求匹配的消费者的 `consumer_name`。如果未匹配,则默认为空字符串。 | + | node | 上游节点 IP 地址。 | -* `Bandwidth`: 流经 APISIX 的总带宽(可分出口带宽和入口带宽),可以统计到每个服务的带宽总和。具有的维度: +- Bandwidth: 经过 APISIX 的总带宽(出口带宽和入口带宽),可以统计到每个服务的带宽总和。属性如下所示: | 名称 | 描述 | | -------------| ------------- | | type | 带宽的类型 (`ingress` 或 `egress`)。 | - | route | 请求匹配的 route 的 `route_id`,未匹配,则默认为空字符串。 | - | service | 与请求匹配的 route 的 `service_id`。当路由缺少 service_id 时,则默认为 `$host`。 | - | consumer | 与请求匹配的 consumer 的 `consumer_name`。未匹配,则默认为空字符串。 | - | node | 命中的 upstream 节点 `ip`。 | + | route | 与请求匹配的路由的 `route_id`,如果未匹配,则默认为空字符串。 | + | service | 与请求匹配的路由的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 | + | consumer | 与请求匹配的消费者的 `consumer_name`。如果未匹配,则默认为空字符串。 | + | node | 消费者节点 IP 地址。 | -* `etcd reachability`: APISIX 连接 etcd 的可用性,用 0 和 1 来表示,`1` 表示可用,`0` 表示不可用。 -* `Connections`: 各种的 Nginx 连接指标,如 active(正处理的活动连接数),reading(nginx 读取到客户端的 Header 信息数),writing(nginx 返回给客户端的 Header 信息数),已建立的连接数。 -* `Batch process entries`: 批处理未发送数据计数器,当你使用了批处理发送插件,比如:sys logger, http logger, sls logger, tcp logger, udp logger and zipkin,那么你将会在此指标中看到批处理当前尚未发送的数据的数量。 -* `Latency`: 每个服务的请求用时和 APISIX 处理耗时的直方图。具有的维度: +- etcd reachability: APISIX 连接 etcd 的可用性,用 0 和 1 来表示,`1` 表示可用,`0` 表示不可用。 +- Connections: 各种的 NGINX 连接指标,如 `active`(正处理的活动连接数),`reading`(NGINX 读取到客户端的 Header 信息数),writing(NGINX 返回给客户端的 Header 信息数),已建立的连接数。 +- Batch process entries: 批处理未发送数据计数器,当你使用了批处理发送插件,比如:[syslog](./syslog.md), [http-logger](./http-logger.md), [tcp-logger](./tcp-logger.md), [udp-logger](./udp-logger.md), and [zipkin](./zipkin.md),那么你将会在此指标中看到批处理当前尚未发送的数据的数量。 +- Latency: 每个服务的请求用时和 APISIX 处理耗时的直方图。属性如下所示: - | 名称 | 描述 | - | -------------| ------------- | - | type | 该值可以为 `apisix`、`upstream` 和 `request`,分别表示耗时的来源为 APISIX、上游及其总和。 | - | service | 与请求匹配的 route 的 `service_id`。当路由缺少 service_id 时,则默认为 `$host`。 | - | consumer | 与请求匹配的 consumer 的 `consumer_name`。未匹配,则默认为空字符串。 | - | node | 命中的 upstream 节点 `ip`。 | + | 名称 | 描述 | + | -------------| --------------------------------------------------------------------------------------- | + | type | 该值可以是 `apisix`、`upstream` 和 `request`,分别表示耗时的来源是 APISIX、上游以及两者总和。 | + | service | 与请求匹配的路由 的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 | + | consumer | 与请求匹配的消费者的 `consumer_name`。未匹配,则默认为空字符串。 | + | node | 上游节点的 IP 地址。 | -* `Info`: 当前 APISIX 节点信息。 +- Info: 当前 APISIX 节点信息。 +- Shared dict: APISIX 中所有共享内存的容量以及剩余可用空间。 -这里是 APISIX 的原始的指标数据集: +以下是 APISIX 的原始的指标数据集: + +```shell +curl http://127.0.0.1:9091/apisix/prometheus/metrics +``` ```shell -$ curl http://127.0.0.1:9091/apisix/prometheus/metrics # HELP apisix_bandwidth Total bandwidth in bytes consumed per service in Apisix # TYPE apisix_bandwidth counter apisix_bandwidth{type="egress",route="",service="",consumer="",node=""} 8417 @@ -254,15 +269,33 @@ apisix_http_latency_bucket{type="upstream",route="1",service="",consumer="",node ... # HELP apisix_node_info Info of APISIX node # TYPE apisix_node_info gauge -apisix_node_info{hostname="desktop-2022q8f-wsl"} 1 +apisix_node_info{hostname="APISIX"} 1 +# HELP apisix_shared_dict_capacity_bytes The capacity of each nginx shared DICT since APISIX start +# TYPE apisix_shared_dict_capacity_bytes gauge +apisix_shared_dict_capacity_bytes{name="access-tokens"} 1048576 +apisix_shared_dict_capacity_bytes{name="balancer-ewma"} 10485760 +apisix_shared_dict_capacity_bytes{name="balancer-ewma-last-touched-at"} 10485760 +apisix_shared_dict_capacity_bytes{name="balancer-ewma-locks"} 10485760 +apisix_shared_dict_capacity_bytes{name="discovery"} 1048576 +apisix_shared_dict_capacity_bytes{name="etcd-cluster-health-check"} 10485760 +... +# HELP apisix_shared_dict_free_space_bytes The free space of each nginx shared DICT since APISIX start +# TYPE apisix_shared_dict_free_space_bytes gauge +apisix_shared_dict_free_space_bytes{name="access-tokens"} 1032192 +apisix_shared_dict_free_space_bytes{name="balancer-ewma"} 10412032 +apisix_shared_dict_free_space_bytes{name="balancer-ewma-last-touched-at"} 10412032 +apisix_shared_dict_free_space_bytes{name="balancer-ewma-locks"} 10412032 +apisix_shared_dict_free_space_bytes{name="discovery"} 1032192 +apisix_shared_dict_free_space_bytes{name="etcd-cluster-health-check"} 10412032 +... ``` ## 禁用插件 -在插件设置页面中删除相应的 json 配置即可禁用 `prometheus` 插件。APISIX 的插件是热加载的,因此无需重启 APISIX 服务。 +当你需要禁用 `prometheus` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "plugins": {}, @@ -279,13 +312,13 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 :::info IMPORTANT -该功能要求 Apache APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。 +该功能要求 APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。 ::: 我们也可以通过 `prometheus` 插件采集 TCP/UDP 指标。 -首先,确保 `prometheus` 插件已经在你的配置文件(`conf/config.yaml`)中启用: +首先,确保 `prometheus` 插件已经在你的配置文件(`./conf/config.yaml`)中启用: ```yaml title="conf/config.yaml" stream_plugins: @@ -293,10 +326,10 @@ stream_plugins: - prometheus ``` -接着你需要在 stream route 中配置该插件: +接着你需要在 stream 路由中配置该插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "prometheus":{} @@ -312,20 +345,20 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 ## 可用的 TCP/UDP 指标 -以下是把 APISIX 作为 L4 代理时可用的指标: +以下是将 APISIX 作为 L4 代理时可用的指标: -* `Stream Connections`: 路由级别的已处理连接数。具有的维度: +* Stream Connections: 路由级别的已处理连接数。具有的维度: - | 名称 | 描述 | - | -------------| --------------------| - | route | 匹配的 stream route ID| -* `Connections`: 各种的 Nginx 连接指标,如 active,reading,writing,已建立的连接数。 -* `Info`: 当前 APISIX 节点信息。 + | 名称 | 描述 | + | ------------- | ---------------------- | + | route | 匹配的 stream 路由 ID。 | +* Connections: 各种的 NGINX 连接指标,如 `active`,`reading`,`writing` 等已建立的连接数。 +* Info: 当前 APISIX 节点信息。 -这里是 APISIX 指标的范例: +以下是 APISIX 指标的示例: ```shell -$ curl http://127.0.0.1:9091/apisix/prometheus/metrics +curl http://127.0.0.1:9091/apisix/prometheus/metrics ``` ``` diff --git a/docs/zh/latest/plugins/proxy-cache.md b/docs/zh/latest/plugins/proxy-cache.md index b22e4eb1ce4c..0fb5c9acb8e5 100644 --- a/docs/zh/latest/plugins/proxy-cache.md +++ b/docs/zh/latest/plugins/proxy-cache.md @@ -1,5 +1,10 @@ --- title: proxy-cache +keywords: + - APISIX + - API 网关 + - Request Validation +description: 本文介绍了 Apache APISIX proxy-cache 插件的相关操作,你可以使用此插件缓存来自上游的响应。 --- + +## 描述 + +`public-api` 插件可用于通过创建路由的方式暴露用户自定义的 API。 + +你可以通过在路由中添加 `public-api` 插件,来保护**自定义插件为了实现特定功能**而暴露的 API。例如,你可以使用 [`jwt-auth`](./jwt-auth.md) 插件创建一个公共 API 端点 `/apisix/plugin/jwt/sign` 用于 JWT 认证。 + +:::note 注意 + +默认情况下,在自定义插件中添加的公共 API 不对外暴露的,你需要手动配置一个路由并启用 `public-api` 插件。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------|--------|----------|---------|------------------------------------------------------------| +| uri | string | 否 | "" | 公共 API 的 URI。在设置路由时,使用此属性来配置初始的公共 API URI。 | + +## 启用插件 + +`public-api` 插件需要与授权插件一起配合使用,以下示例分别用到了 [`jwt-auth`](./jwt-auth.md) 插件和 [`key-auth`](./key-auth.md) 插件。 + +### 基本用法 + +首先,你需要启用并配置 `jwt-auth` 插件,详细使用方法请参考 [`jwt-auth`](./jwt-auth.md) 插件文档。 + +然后,使用以下命令在指定路由上启用并配置 `public-api` 插件: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/apisix/plugin/jwt/sign", + "plugins": { + "public-api": {} + } +}' +``` + +**测试插件** + +向配置的 URI 发出访问请求,如果返回一个包含 JWT Token 的响应,则代表插件生效: + +```shell +curl 'http://127.0.0.1:9080/apisix/plugin/jwt/sign?key=user-key' +``` + +```shell +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NTk0Mjg1MzIsImtleSI6InVzZXIta2V5In0.NhrWrO-da4kXezxTLdgFBX2rJA2dF1qESs8IgmwhNd0 +``` + +### 使用自定义 URI + +首先,你需要启用并配置 `jwt-auth` 插件,详细使用方法请参考 [`jwt-auth`](./jwt-auth.md) 插件文档。 + +然后,你可以使用一个自定义的 URI 来暴露 API: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r2' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/gen_token", + "plugins": { + "public-api": { + "uri": "/apisix/plugin/jwt/sign" + } + } +}' +``` + +**测试插件** + +向自定义的 URI 发出访问请求,如果返回一个包含 JWT Token 的响应,则代表插件生效: + +```shell +curl 'http://127.0.0.1:9080/gen_token?key=user-key' +``` + +```shell +eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NTk0Mjg1NjIsImtleSI6InVzZXIta2V5In0.UVkXWbyGb8ajBNtxs0iAaFb2jTEWIlqTR125xr1ZMLc +``` + +### 确保 Route 安全 + +你可以配合使用 `key-auth` 插件来添加认证,从而确保路由的安全: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r2' \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/gen_token", + "plugins": { + "public-api": { + "uri": "/apisix/plugin/jwt/sign" + }, + "key-auth": { + "key": "test-apikey" + } + } +}' +``` + +**测试插件** + +通过上述命令启用插件并添加认证后,只有经过认证的请求才能访问。 + +发出访问请求并指定 `apikey`,如果返回 `200` HTTP 状态码,则说明请求被允许: + +```shell +curl -i 'http://127.0.0.1:9080/gen_token?key=user-key' \ + -H "apikey: test-apikey" +``` + +```shell +HTTP/1.1 200 OK +``` + +发出访问请求,如果返回 `401` HTTP 状态码,则说明请求被阻止,插件生效: + +```shell +curl -i 'http://127.0.0.1:9080/gen_token?key=user-key' +``` + +```shell +HTTP/1.1 401 Unauthorized +``` + +## 禁用插件 + +当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/real-ip.md b/docs/zh/latest/plugins/real-ip.md index f931c21f8ca3..0eed3534c86a 100644 --- a/docs/zh/latest/plugins/real-ip.md +++ b/docs/zh/latest/plugins/real-ip.md @@ -35,7 +35,7 @@ description: 本文介绍了关于 Apache APISIX `real-ip` 插件的基本信息 :::info IMPORTANT -该插件要求 APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-APISIX-Base-环境?) 上。 +该插件要求 APISIX 运行在 [APISIX-Base](../FAQ.md#如何构建-apisix-base-环境) 上。 ::: @@ -58,7 +58,7 @@ description: 本文介绍了关于 Apache APISIX `real-ip` 插件的基本信息 以下示例展示了如何在指定路由中启用 `real-ip` 插件: ```shell -curl -i http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", @@ -102,7 +102,7 @@ remote-port: 9080 当你需要禁用 `real-ip` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", diff --git a/docs/zh/latest/plugins/redirect.md b/docs/zh/latest/plugins/redirect.md index c40726a16887..61e0862e0776 100644 --- a/docs/zh/latest/plugins/redirect.md +++ b/docs/zh/latest/plugins/redirect.md @@ -34,20 +34,20 @@ description: 本文介绍了关于 Apache APISIX `redirect` 插件的基本信 | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | |---------------------|---------------|-----|-------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| http_to_https | boolean | 否 | false | [true,false] | 当设置为 `true` 并且请求是 HTTP 时,它将被重定向具有相同 URI 和 301 状态码的 HTTPS。 | +| http_to_https | boolean | 否 | false | [true,false] | 当设置为 `true` 并且请求是 HTTP 时,它将被重定向具有相同 URI 和 301 状态码的 HTTPS,原 URI 的查询字符串也将包含在 Location 头中。 | | uri | string | 否 | | | 要重定向到的 URI,可以包含 NGINX 变量。例如:`/test/index.htm`, `$uri/index.html`,`${uri}/index.html`。如果你引入了一个不存在的变量,它不会报错,而是将其视为一个空变量。 | | regex_uri | array[string] | 否 | | | 将来自客户端的 URL 与正则表达式匹配并重定向。当匹配成功后使用模板替换发送重定向到客户端,如果未匹配成功会将客户端请求的 URI 转发至上游。 和 `regex_uri` 不可以同时存在。例如:["^/iresty/(.)/(.)/(.*)","/$1-$2-$3"] 第一个元素代表匹配来自客户端请求的 URI 正则表达式,第二个元素代表匹配成功后发送重定向到客户端的 URI 模板。 | | ret_code | integer | 否 | 302 | [200, ...] | HTTP 响应码 | -| encode_uri | boolean | 否 | false | [true,false] | 当设置为 `true` 时,对返回的 `Location` Header 按照 [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986)的编码格式进行编码。 | +| encode_uri | boolean | 否 | false | [true,false] | 当设置为 `true` 时,对返回的 `Location` Header 按照 [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986) 的编码格式进行编码。 | | append_query_string | boolean | 否 | false | [true,false] | 当设置为 `true` 时,将原始请求中的查询字符串添加到 `Location` Header。如果已配置 `uri` 或 `regex_uri` 已经包含查询字符串,则请求中的查询字符串将附加一个`&`。如果你已经处理过查询字符串(例如,使用 NGINX 变量 `$request_uri`),请不要再使用该参数以避免重复。 | :::note -`http_to_https`、`uri` 和 `regex_uri` 只能配置其中一个属性。 - +* `http_to_https`、`uri` 和 `regex_uri` 只能配置其中一个属性。 +* `http_to_https`、和 `append_query_string` 只能配置其中一个属性。 * 当开启 `http_to_https` 时,重定向 URL 中的端口将按如下顺序选取一个值(按优先级从高到低排列) * 从配置文件(`conf/config.yaml`)中读取 `plugin_attr.redirect.https_port`。 - * 如果 `apisix.ssl` 处于开启状态,先读取 `apisix.ssl.listen_port`,如果没有,再读取 `apisix.ssl.listen` 并从中随机选一个 `port`。 + * 如果 `apisix.ssl` 处于开启状态,读取 `apisix.ssl.listen` 并从中随机选一个 `port`。 * 使用 443 作为默认 `https port`。 ::: @@ -57,7 +57,7 @@ description: 本文介绍了关于 Apache APISIX `redirect` 插件的基本信 以下示例展示了如何在指定路由中启用 `redirect` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/test/index.html", @@ -79,7 +79,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 \ 你也可以在新的 URI 中使用 NGINX 内置的任意变量: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/test", @@ -121,7 +121,7 @@ Location: /test/default.html 以下示例展示了如何将 HTTP 重定向到 HTTPS: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", @@ -151,7 +151,7 @@ Location: https://127.0.0.1:9443/hello 当你需要禁用 `redirect` 插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 \ +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/test/index.html", diff --git a/docs/zh/latest/plugins/referer-restriction.md b/docs/zh/latest/plugins/referer-restriction.md index cdf6d4605dc5..430f8c112775 100644 --- a/docs/zh/latest/plugins/referer-restriction.md +++ b/docs/zh/latest/plugins/referer-restriction.md @@ -1,5 +1,10 @@ --- title: referer-restriction +keywords: + - APISIX + - API 网关 + - Referer restriction +description: 本文介绍了 Apache APISIX referer-restriction 插件的使用方法,通过该插件可以将 referer 请求头中的域名加入黑名单或者白名单来限制其对服务或路由的访问。 --- + +## 描述 + +`workflow` 插件引入 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 来提供复杂的流量控制功能。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| rules.case | array[array] | 是 | | | 由一个或多个{var, operator, val}元素组成的列表,类似这样:{{var, operator, val}, {var, operator, val}, ...}}。例如:{"arg_name", "==", "json"},表示当前请求参数 name 是 json。这里的 var 与 NGINX 内部自身变量命名保持一致,所以也可以使用 request_uri、host 等;对于 operator 部分,目前已支持的运算符有 ==、~=、~~、>、<、in、has 和 ! 。关于操作符的具体用法请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的 `operator-list` 部分。 | +| rules.actions | array[object] | 是 | | | 当 `case` 成功匹配时要执行的 `actions`。目前,`actions` 中只支持一个元素。`actions` 的唯一元素的第一个子元素可以是 `return` 或 `limit-count`。 | + +### `actions` 属性 + +#### return + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | ----| ------------- | +| actions[1].return | string | 否 | | | 直接返回到客户端。 | +| actions[1].[2].code | integer | 否 | | | 返回给客户端的 HTTP 状态码。 | + +#### limit-count + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | ----| ------------- | +| actions[1].limit-count | string | 否 | | | 执行 `limit-count` 插件的功能。 | +| actions[1].[2] | object | 否 | | | `limit-count` 插件的配置。 | + +:::note + +在 `rules` 中,按照 `rules` 的数组下标顺序依次匹配 `case`,如果 `case` 匹配成功,则直接执行对应的 `actions`。 + +::: + +## 启用插件 + +以下示例展示了如何在路由中启用 `workflow` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "plugins":{ + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/hello/rejected"] + ], + "actions":[ + [ + "return", + {"code": 403} + ] + ] + }, + { + "case":[ + ["uri", "==", "/hello/v2/appid"] + ], + "actions":[ + [ + "limit-count", + { + "count":2, + "time_window":60, + "rejected_code":429 + } + ] + ] + } + ] + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` + +如上,我们启用了 `workflow` 插件,如果请求与 `rules` 中的 `case` 匹配,则会执行对应的 `actions`。 + +**示例 1: 如果请求的 uri 是 `/hello/rejected`,则返回给客户端状态码 `403`** + +```shell +curl http://127.0.0.1:9080/hello/rejected -i +HTTP/1.1 403 Forbidden +...... + +{"error_msg":"rejected by workflow"} +``` + +**示例 2: 如果请求的 uri 是 `/hello/v2/appid`,则执行 `limit-count` 插件,限制请求的数量为 2,时间窗口为 60 秒,如果超过限制数量,则返回给客户端状态码 `429`** + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 200 OK +``` + +```shell +curl http://127.0.0.1:0080/hello/v2/appid -i +HTTP/1.1 429 Too Many Requests +``` + +**示例 3: 如果请求不能被任何 `case` 匹配,则 `workflow` 不会执行任何操作** + +```shell +curl http://127.0.0.1:0080/hello/fake -i +HTTP/1.1 200 OK +``` + +## Disable Plugin + +当你需要禁用 `workflow` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri":"/hello/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/zipkin.md b/docs/zh/latest/plugins/zipkin.md index 88b2752f0596..e8f5814b9ad0 100644 --- a/docs/zh/latest/plugins/zipkin.md +++ b/docs/zh/latest/plugins/zipkin.md @@ -111,7 +111,7 @@ func main(){ 以下示例展示了如何在指定路由中启用 `zipkin` 插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -178,7 +178,7 @@ docker run -d --name jaeger \ 通过以下命令创建路由并启用插件: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", @@ -221,7 +221,7 @@ HTTP/1.1 200 OK 当你需要禁用 `zipkin` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", diff --git a/docs/zh/latest/router-radixtree.md b/docs/zh/latest/router-radixtree.md index 63886f4e4b82..9291ec22d628 100644 --- a/docs/zh/latest/router-radixtree.md +++ b/docs/zh/latest/router-radixtree.md @@ -83,7 +83,7 @@ title: 路由 RadixTree 创建两条 `priority` 值不同的路由(值越大,优先级越高)。 ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -97,7 +97,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -124,7 +124,7 @@ curl http://127.0.0.1:1980/hello 以下是设置主机匹配规则的示例: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -138,7 +138,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f ``` ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -197,7 +197,7 @@ apisix: 具体参数及使用方式请查看 [radixtree#new](https://github.com/api7/lua-resty-radixtree#new) 文档,下面是一个简单的示例: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "vars": [ @@ -226,7 +226,7 @@ APISIX 支持通过 POST 表单属性过滤路由,其中需要您使用 `Conte 我们可以定义这样的路由: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST"], "uri": "/_post", @@ -274,7 +274,7 @@ query getRepo { 我们可以用以下方法过滤掉这样的路由: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "methods": ["POST", "GET"], "uri": "/graphql", diff --git a/docs/zh/latest/stand-alone.md b/docs/zh/latest/stand-alone.md index 07f7425afad7..7effb17b6da6 100644 --- a/docs/zh/latest/stand-alone.md +++ b/docs/zh/latest/stand-alone.md @@ -282,9 +282,6 @@ stream_routes: mqtt-proxy: protocol_name: "MQTT" protocol_level: 4 - upstream: - ip: "127.0.0.1" - port: 1995 upstreams: - nodes: "127.0.0.1:1995": 1 diff --git a/docs/zh/latest/stream-proxy.md b/docs/zh/latest/stream-proxy.md index f2b330ce5a9a..4fd5ef28de17 100644 --- a/docs/zh/latest/stream-proxy.md +++ b/docs/zh/latest/stream-proxy.md @@ -58,7 +58,7 @@ apisix: 简例如下: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "remote_addr": "127.0.0.1", "upstream": { @@ -71,7 +71,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 ``` 例子中 APISIX 对客户端 IP 为 `127.0.0.1` 的请求代理转发到上游主机 `127.0.0.1:1995`。 -更多用例,请参照 [test case](../../../t/stream-node/sanity.t)。 +更多用例,请参照 [test case](https://github.com/apache/apisix/blob/master/t/stream-node/sanity.t)。 ## 更多 route 匹配选项 @@ -84,7 +84,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 例如 ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.1", "server_port": 2000, @@ -127,7 +127,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 3. 现在我们将创建一个带有服务器过滤的 stream 路由: ```shell - curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "server_addr": "127.0.0.10", "server_port": 9101, @@ -184,7 +184,7 @@ mTLS 也是支持的,参考 [保护路由](./mtls.md#保护路由)。 然后,我们需要配置一个 route,匹配连接并代理到上游: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { @@ -198,7 +198,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 当连接为 TLS over TCP 时,我们可以通过 SNI 来匹配路由,比如: ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "sni": "a.test.com", "upstream": { @@ -217,7 +217,7 @@ curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 APISIX 还支持代理到 TLS over TCP 上游。 ```shell -curl http://127.0.0.1:9080/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "scheme": "tls", diff --git a/docs/zh/latest/terminology/consumer.md b/docs/zh/latest/terminology/consumer.md index fc9102983b35..02c15f5ae021 100644 --- a/docs/zh/latest/terminology/consumer.md +++ b/docs/zh/latest/terminology/consumer.md @@ -50,7 +50,7 @@ title: Consumer ```shell # 创建 Consumer ,指定认证插件 key-auth ,并开启特定插件 limit-count -$ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -67,7 +67,7 @@ $ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335 }' # 创建 Router,设置路由规则和启用插件配置 -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {} @@ -100,7 +100,7 @@ HTTP/1.1 503 Service Temporarily Unavailable ```shell # 设置黑名单,禁止 jack 访问该 API -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {}, diff --git a/docs/zh/latest/terminology/plugin.md b/docs/zh/latest/terminology/plugin.md index 8883ef3744af..b832d394c1fc 100644 --- a/docs/zh/latest/terminology/plugin.md +++ b/docs/zh/latest/terminology/plugin.md @@ -23,13 +23,11 @@ title: Plugin `Plugin` 表示将在 `HTTP` 请求/响应生命周期期间执行的插件配置。 -`Plugin` 配置可直接绑定在 `Route` 上,也可以被绑定在 `Service` 或 `Consumer`上。而对于同一个插件的配置,只能有一份是有效的,配置选择优先级总是 `Consumer` > `Route` > `Service`。 +`Plugin` 配置可直接绑定在 `Route` 上,也可以被绑定在 `Service`、`Consumer` 或 `Plugin Config` 上。而对于同一个插件的配置,只能有一份是有效的,配置选择优先级总是 `Consumer` > `Route` > `Plugin Config` > `Service`。 在 `conf/config.yaml` 中,可以声明本地 APISIX 节点都支持哪些插件。这是个白名单机制,不在该白名单的插件配置,都将会被自动忽略。这个特性可用于临时关闭或打开特定插件,应对突发情况非常有效。 如果你想在现有插件的基础上新增插件,注意需要拷贝 `conf/config-default.yaml` 的插件节点内容到 `conf/config.yaml` 的插件节点中。 -插件的配置可以被直接绑定在指定 Route 中,也可以被绑定在 Service 中,不过 Route 中的插件配置优先级更高。 - 一个插件在一次请求中只会执行一次,即使被同时绑定到多个不同对象中(比如 Route 或 Service)。 插件运行先后顺序是根据插件自身的优先级来决定的,例如: @@ -68,7 +66,34 @@ local _M = { ## 插件通用配置 -一些通用的配置可以应用于插件配置。比如说。 +通过 `_meta` 配置项可以将一些通用的配置应用于插件,具体配置项如下: + +| 名称 | 类型 | 描述 | +|--------------|------|----------------| +| disable | boolean | 是否禁用该插件。 | +| error_response | string/object | 自定义错误响应。 | +| priority | integer | 自定义插件优先级。 | +| filter | array | 根据请求的参数,在运行时控制插件是否执行。此配置由一个或多个 {var, operator, val} 元素组成列表,类似:{{var, operator, val}, {var, operator, val}, ...}}。例如 `{"arg_version", "==", "v2"}`,表示当前请求参数 `version` 是 `v2`。这里的 `var` 与 NGINX 内部自身变量命名是保持一致。操作符的具体用法请看[lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的 operator-list 部分。| + +### 禁用插件 + +通过 `disable` 配置,你可以新增一个处于禁用状态的插件,请求不会经过该插件。 + +```json +{ + "proxy-rewrite": { + "_meta": { + "disable": true + } + } +} +``` + +### 自定义错误响应 + +通过 `error_response` 配置,可以将任意插件的错误响应配置成一个固定的值,避免因为插件内置的错误响应信息而带来困扰。 + +如下配置表示将 `jwt-auth` 插件的错误响应自定义为 '{"message": "Missing credential in request"}'。 ```json { @@ -82,13 +107,141 @@ local _M = { } ``` -上面的配置意味着将 jwt-auth 插件的错误响应自定义为 '{"message": "Missing credential in request"}'。 +### 自定义插件优先级 -### 在 `_meta` 下的插件通用配置 +所有插件都有默认优先级,但你仍可以通过 `priority` 配置项来自定义插件优先级,从而改变插件执行顺序。 -| 名称 | 类型 | 描述 | -|--------------|------|----------------| -| error_response | string/object | 自定义错误响应 | +```json + { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } +} +``` + +serverless-pre-function 的默认优先级是 10000,serverless-post-function 的默认优先级是 -2000。默认情况下会先执行 serverless-pre-function 插件,再执行 serverless-post-function 插件。 + +上面的配置意味着将 serverless-pre-function 插件的优先级设置为 -2000,serverless-post-function 插件的优先级设置为 10000。serverless-post-function 插件会先执行,再执行 serverless-pre-function 插件。 + +注意: + +- 自定义插件优先级只会影响插件实例绑定的主体,不会影响该插件的所有实例。比如上面的插件配置属于路由 A ,路由 B 上的插件 serverless-post-function 和 serverless-post-function 插件执行顺序不会受到影响,会使用默认优先级。 +- 自定义插件优先级不适用于 consumer 上配置的插件的 rewrite 阶段。路由上配置的插件的 rewrite 阶段将会优先运行,然后才会运行 consumer 上除 auth 插件之外的其他插件的 rewrite 阶段。 + +### 动态控制插件是否执行 + +默认情况下,在路由中指定的插件都会被执行。但是我们可以通过 `filter` 配置项为插件添加一个过滤器,通过过滤器的执行结果控制插件是否执行。 + +如下配置表示,只有当请求查询参数中 `version` 值为 `v2` 时,`proxy-rewrite` 插件才会执行。 + +```json +{ + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } +} +``` + +使用下述配置创建一条完整的路由: + +```json +{ + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +} +``` + +当请求中不带任何参数时,`proxy-rewrite` 插件不会执行,请求将被转发到上游的 `/get`: + +```shell +curl -v /dev/null http://127.0.0.1:9080/get -H"host:httpbin.org" +``` + +```shell +< HTTP/1.1 200 OK +...... +< Server: APISIX/2.15.0 +< +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6eec-46c97e8a5d95141e621e07fe", + "X-Forwarded-Host": "httpbin.org" + }, + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/get" +} +``` + +当请求中携带参数 `version=v2` 时,`proxy-rewrite` 插件执行,请求将被转发到上游的 `/anything`: + +```shell +curl -v /dev/null http://127.0.0.1:9080/get?version=v2 -H"host:httpbin.org" +``` + +```shell +< HTTP/1.1 200 OK +...... +< Server: APISIX/2.15.0 +< +{ + "args": { + "version": "v2" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6f02-24a613b57b6587a076ef18b4", + "X-Forwarded-Host": "httpbin.org" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/anything?version=v2" +} +``` ## 热加载 @@ -97,7 +250,7 @@ APISIX 的插件是热加载的,不管你是新增、删除还是修改插件 只需要通过 admin API 发送一个 HTTP 请求即可: ```shell -curl http://127.0.0.1:9080/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT ``` 注意:如果你已经在路由规则里配置了某个插件(比如在 `route` 的 `plugins` 字段里面添加了它),然后禁用了该插件,在执行路由规则的时候会跳过这个插件。 diff --git a/docs/zh/latest/terminology/route.md b/docs/zh/latest/terminology/route.md index 161fc0bb92f0..53ac72f11cd4 100644 --- a/docs/zh/latest/terminology/route.md +++ b/docs/zh/latest/terminology/route.md @@ -35,7 +35,7 @@ Route 中主要包含三部分内容:匹配规则(比如 uri、host、remote 下面创建的 Route 示例,是把 URL 为 "/index.html" 的请求代理到地址为 "127.0.0.1:1980" 的 Upstream 服务: ```shell -$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/index.html", "upstream": { @@ -53,7 +53,7 @@ Transfer-Encoding: chunked Connection: keep-alive Server: APISIX web server -{"node":{"value":{"uri":"\/index.html","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925},"action":"create"} +{"node":{"value":{"uri":"\/index.html","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} ``` 当我们接收到成功应答,表示该 Route 已成功创建。 diff --git a/docs/zh/latest/terminology/service.md b/docs/zh/latest/terminology/service.md index e5c15086d978..6f34fee9155b 100644 --- a/docs/zh/latest/terminology/service.md +++ b/docs/zh/latest/terminology/service.md @@ -32,7 +32,7 @@ title: Service ```shell # create new Service -$ curl http://127.0.0.1:9080/apisix/admin/services/200 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +$ curl http://127.0.0.1:9180/apisix/admin/services/200 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "limit-count": { @@ -51,14 +51,14 @@ $ curl http://127.0.0.1:9080/apisix/admin/services/200 -H 'X-API-KEY: edd1c9f034 }' # create new Route and reference the service by id `200` -curl http://127.0.0.1:9080/apisix/admin/routes/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/100 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/index.html", "service_id": "200" }' -curl http://127.0.0.1:9080/apisix/admin/routes/101 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/101 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "methods": ["GET"], "uri": "/foo/index.html", @@ -69,7 +69,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/101 -H 'X-API-KEY: edd1c9f034335f 当然我们也可以为 Route 指定不同的插件参数或上游,比如下面这个 Route 设置了不同的限流参数,其他部分(比如上游)则继续使用 Service 中的配置参数。 ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/102 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/bar/index.html", "id": "102", diff --git a/docs/zh/latest/terminology/upstream.md b/docs/zh/latest/terminology/upstream.md index 6c2a47ad9c56..7905bab639e6 100644 --- a/docs/zh/latest/terminology/upstream.md +++ b/docs/zh/latest/terminology/upstream.md @@ -36,7 +36,7 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 创建上游对象用例: ```shell -curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "type": "chash", "key": "remote_addr", @@ -50,7 +50,7 @@ curl http://127.0.0.1:9080/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335 上游对象创建后,均可以被具体 `Route` 或 `Service` 引用,例如: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "upstream_id": 1 @@ -60,7 +60,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 为了方便使用,也可以直接把上游地址直接绑到某个 `Route` 或 `Service` ,例如: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -83,7 +83,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 下面是一个配置了健康检查的示例: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/index.html", "plugins": { @@ -127,7 +127,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 创建一个 consumer 对象: ```shell -curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "username": "jack", "plugins": { @@ -141,7 +141,7 @@ curl http://127.0.0.1:9080/apisix/admin/consumers -H 'X-API-KEY: edd1c9f034335f1 新建路由,打开 `key-auth` 插件认证,`upstream` 的 `hash_on` 类型为 `consumer`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "plugins": { "key-auth": {} @@ -169,7 +169,7 @@ curl http://127.0.0.1:9080/server_port -H "apikey: auth-jack" 新建路由和 `Upstream`,`hash_on` 类型为 `cookie`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hash_on_cookie", "upstream": { @@ -195,7 +195,7 @@ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 新建路由和 `Upstream`,`hash_on` 类型为 `header`,`key` 为 `content-type`: ```shell -curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hash_on_header", "upstream": { diff --git a/powered-by.md b/powered-by.md index 48b12a1a05b4..6b77a6e57704 100644 --- a/powered-by.md +++ b/powered-by.md @@ -97,6 +97,7 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ 1. 数地科技 1. 微吼 1. 小鹏汽车 +1. Ideacreep diff --git a/rockspec/apisix-2.13.2-0.rockspec b/rockspec/apisix-2.13.2-0.rockspec new file mode 100644 index 000000000000..e807d0e4d752 --- /dev/null +++ b/rockspec/apisix-2.13.2-0.rockspec @@ -0,0 +1,100 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "2.13.2-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "2.13.2", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lua-resty-dns-client = 6.0.2", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.6.0", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 2.24", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.1", + "lua-protobuf = 0.3.4", + "lua-resty-openidc = 1.7.2-1", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220127", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.07", + "lua-resty-logger-socket = 2.0-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.1", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.0rc1-2", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.4.0", + "casbin = 1.41.1", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.1-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-2.13.3-0.rockspec b/rockspec/apisix-2.13.3-0.rockspec new file mode 100644 index 000000000000..9814a44f13ea --- /dev/null +++ b/rockspec/apisix-2.13.3-0.rockspec @@ -0,0 +1,100 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "2.13.3-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "2.13.3", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lua-resty-dns-client = 6.0.2", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.8.3", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 2.24", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.1", + "lua-protobuf = 0.3.4", + "lua-resty-openidc = 1.7.2-1", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220127", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.07", + "lua-resty-logger-socket = 2.0-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.1", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.0rc1-2", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.4.0", + "casbin = 1.41.1", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.1-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-2.15.0-0.rockspec b/rockspec/apisix-2.15.0-0.rockspec new file mode 100644 index 000000000000..31ab2e23f00a --- /dev/null +++ b/rockspec/apisix-2.15.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "2.15.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "2.15.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lua-resty-dns-client = 6.0.2", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.8.2", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.0", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.3.4", + "lua-resty-openidc = 1.7.5", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220527", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.20-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.1", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.0rc1-2", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.5.0", + "casbin = 1.41.1", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.1-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} + diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec index f5bfdae206f7..a42d5207e6c6 100644 --- a/rockspec/apisix-master-0.rockspec +++ b/rockspec/apisix-master-0.rockspec @@ -34,12 +34,12 @@ dependencies = { "lua-resty-ctxdump = 0.1-0", "lua-resty-dns-client = 6.0.2", "lua-resty-template = 2.0", - "lua-resty-etcd = 1.7.0", + "lua-resty-etcd = 1.8.3", "api7-lua-resty-http = 0.2.0", "lua-resty-balancer = 0.04", "lua-resty-ngxvar = 0.5.2", "lua-resty-jit-uuid = 0.0.7", - "lua-resty-healthcheck-api7 = 2.2.0", + "lua-resty-healthcheck-api7 = 2.2.1", "api7-lua-resty-jwt = 0.2.4", "lua-resty-hmac-ffi = 0.05", "lua-resty-cookie = 0.1.0", @@ -47,7 +47,7 @@ dependencies = { "opentracing-openresty = 0.1", "lua-resty-radixtree = 2.8.2", "lua-protobuf = 0.3.4", - "lua-resty-openidc = 1.7.2-1", + "lua-resty-openidc = 1.7.5", "luafilesystem = 1.7.0-2", "api7-lua-tinyyaml = 0.4.2", "nginx-lua-prometheus = 0.20220527", @@ -60,7 +60,7 @@ dependencies = { "binaryheap = 0.4", "api7-dkjson = 0.1.1", "resty-redis-cluster = 1.02-4", - "lua-resty-expr = 1.3.1", + "lua-resty-expr = 1.3.2", "graphql = 0.0.2", "argparse = 0.7.1-1", "luasocket = 3.0rc1-2", @@ -77,7 +77,8 @@ dependencies = { "net-url = 0.9-1", "xml2lua = 1.5-2", "nanoid = 0.1-1", - "lua-resty-mediador = 0.1.2-1" + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0" } build = { diff --git a/t/APISIX.pm b/t/APISIX.pm index 0143aa9d82ba..070733c28708 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -33,6 +33,13 @@ my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); $ENV{TEST_NGINX_FAST_SHUTDOWN} ||= 1; +Test::Nginx::Socket::set_http_config_filter(sub { + my $config = shift; + my $snippet = `$apisix_home/t/bin/gen_snippet.lua conf_server`; + $config .= $snippet; + return $config; +}); + sub read_file($) { my $infile = shift; open my $in, "$apisix_home/$infile" @@ -90,6 +97,8 @@ my $ssl_ecc_crt = read_file("t/certs/apisix_ecc.crt"); my $ssl_ecc_key = read_file("t/certs/apisix_ecc.key"); my $test2_crt = read_file("t/certs/test2.crt"); my $test2_key = read_file("t/certs/test2.key"); +my $etcd_pem = read_file("t/certs/etcd.pem"); +my $etcd_key = read_file("t/certs/etcd.key"); $user_yaml_config = <<_EOC_; apisix: node_listen: 1984 @@ -104,9 +113,13 @@ my $etcd_enable_auth = $ENV{"ETCD_ENABLE_AUTH"} || "false"; if ($etcd_enable_auth eq "true") { $user_yaml_config .= <<_EOC_; -etcd: - user: root - password: 5tHkHhYkjr6cQY +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + user: root + password: 5tHkHhYkjr6cQY _EOC_ } @@ -477,6 +490,12 @@ _EOC_ dns_resolver = $dns_addrs_tbl_str, } apisix.http_init(args) + + -- set apisix_lua_home into constans module + -- it may be used by plugins to determine the work path of apisix + local constants = require("apisix.constants") + constants.apisix_lua_home = "$apisix_home" + $extra_init_by_lua _EOC_ @@ -561,17 +580,11 @@ _EOC_ require("apisix").http_init_worker() $extra_init_worker_by_lua } -_EOC_ - if ($version !~ m/\/1.17.8/) { - $http_config .= <<_EOC_; exit_worker_by_lua_block { require("apisix").http_exit_worker() } -_EOC_ - } - $http_config .= <<_EOC_; log_format main escape=default '\$remote_addr - \$remote_user [\$time_local] \$http_host "\$request" \$status \$body_bytes_sent \$request_time "\$http_referer" "\$http_user_agent" \$upstream_addr \$upstream_status \$upstream_response_time "\$upstream_scheme://\$upstream_host\$upstream_uri"'; # fake server, only for test @@ -604,6 +617,7 @@ _EOC_ more_clear_headers Date; } + # this configuration is needed as error_page is configured in http block location \@50x.html { set \$from_error_page 'true'; content_by_lua_block { @@ -850,6 +864,10 @@ $ssl_ecc_key $test2_crt >>> ../conf/cert/test2.key $test2_key +>>> ../conf/cert/etcd.pem +$etcd_pem +>>> ../conf/cert/etcd.key +$etcd_key $user_apisix_yaml _EOC_ diff --git a/t/admin/api.t b/t/admin/api.t index 98007aecdf42..71c924915435 100644 --- a/t/admin/api.t +++ b/t/admin/api.t @@ -24,7 +24,7 @@ add_block_preprocessor(sub { my ($block) = @_; if (!$block->request) { - $block->set_value("request", "GET /t"); + $block->set_value("request", "GET /apisix/admin/routes"); } if (!$block->no_error_log && !$block->error_log) { @@ -37,19 +37,8 @@ run_tests; __DATA__ === TEST 1: Server header for admin API ---- config - location /t { - content_by_lua_block { - local http = require("resty.http") - local httpc = http.new() - uri = ngx.var.scheme .. "://" .. ngx.var.server_addr - .. ":" .. ngx.var.server_port .. "/apisix/admin/routes" - local res, err = httpc:request_uri(uri) - ngx.say(res.headers["Server"]) - } - } ---- response_body eval -qr/APISIX\// +--- response_headers_like +Server: APISIX/(.*) @@ -58,16 +47,38 @@ qr/APISIX\// apisix: node_listen: 1984 enable_server_tokens: false ---- config - location /t { - content_by_lua_block { - local http = require("resty.http") - local httpc = http.new() - uri = ngx.var.scheme .. "://" .. ngx.var.server_addr - .. ":" .. ngx.var.server_port .. "/apisix/admin/routes" - local res, err = httpc:request_uri(uri) - ngx.say(res.headers["Server"]) - } - } ---- response_body -APISIX +--- error_code: 401 +--- response_headers +Server: APISIX + + + +=== TEST 3: Version header for admin API (without apikey) +--- yaml_config +apisix: + admin_api_version: default +--- error_code: 401 +--- response_headers +! X-API-VERSION + + + +=== TEST 4: Version header for admin API (v2) +--- yaml_config +apisix: + admin_api_version: v2 # default may change +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v2 + + + +=== TEST 5: Version header for admin API (v3) +--- yaml_config +apisix: + admin_api_version: v3 +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v3 diff --git a/t/admin/consumers.t b/t/admin/consumers.t index 9ba284b3e494..8e6862f1a2f0 100644 --- a/t/admin/consumers.t +++ b/t/admin/consumers.t @@ -38,13 +38,11 @@ __DATA__ "desc": "new consumer" }]], [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer" - } + "value": { + "username": "jack", + "desc": "new consumer" }, - "action": "set" + "key": "/apisix/consumers/jack" }]] ) @@ -86,18 +84,16 @@ passed } }]], [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer", - "plugins": { - "key-auth": { - "key": "auth-one" - } + "value": { + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "auth-one" } } }, - "action": "set" + "key": "/apisix/consumers/jack" }]] ) @@ -129,18 +125,16 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer", - "plugins": { - "key-auth": { - "key": "auth-one" - } + "value": { + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "auth-one" } } }, - "action": "get" + "key": "/apisix/consumers/jack" }]] ) @@ -164,10 +158,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers/jack', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.status = code ngx.say(body) @@ -189,11 +181,8 @@ passed local t = require("lib.test_admin").test local code = t('/apisix/admin/consumers/not_found', ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + nil + ) ngx.say("[delete] code: ", code) } } @@ -217,12 +206,9 @@ GET /t "id":"jack" }]], [[{ - "node": { - "value": { - "id": "jack" - } - }, - "action": "set" + "value": { + "id": "jack" + } }]] ) @@ -257,18 +243,16 @@ GET /t } }]], [[{ - "node": { - "value": { - "username": "jack", - "desc": "new consumer", - "labels": { - "build":"16", - "env":"production", - "version":"v2" - } + "value": { + "username": "jack", + "desc": "new consumer", + "labels": { + "build":"16", + "env":"production", + "version":"v2" } }, - "action": "set" + "key": "/apisix/consumers/jack" }]] ) @@ -353,15 +337,13 @@ GET /t "update_time": 1602893670 }]], [[{ - "node": { - "value": { - "username": "pony", - "desc": "new consumer", - "create_time": 1602883670, - "update_time": 1602893670 - } + "value": { + "username": "pony", + "desc": "new consumer", + "create_time": 1602883670, + "update_time": 1602893670 }, - "action": "set" + "key": "/apisix/consumers/pony" }]] ) @@ -385,10 +367,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/consumers/pony', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.status = code ngx.say(body) diff --git a/t/admin/consumers2.t b/t/admin/consumers2.t index 3c296b79339f..6e351d02be96 100644 --- a/t/admin/consumers2.t +++ b/t/admin/consumers2.t @@ -59,13 +59,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/consumers/jack","value":{"username":"jack"}}} +{"key":"/apisix/consumers/jack","value":{"username":"jack"}} @@ -87,18 +87,19 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/consumers/jack","value":{"username":"jack"}}} +{"key":"/apisix/consumers/jack","value":{"username":"jack"}} @@ -124,7 +125,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/consumers/jack","node":{}} +{"deleted":"1","key":"/apisix/consumers/jack"} @@ -150,7 +151,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/consumers","nodes":[]}} +{"list":[],"total":0} diff --git a/t/admin/filter.t b/t/admin/filter.t new file mode 100644 index 000000000000..6173a8548f6a --- /dev/null +++ b/t/admin/filter.t @@ -0,0 +1,808 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +worker_connections(1024); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + admin_key: null + admin_api_version: v3 +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: bad page_size(page_size must be between 10 and 500) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/?page=1&page_size=2', + ngx.HTTP_GET + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body +page_size must be between 10 and 500 + + + +=== TEST 2: ignore bad page and would use default value 1 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=-1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: sort by createdIndex +# the smaller the createdIndex, the higher the ranking +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + + for i = 1, #res.list - 1 do + assert(res.list[i].createdIndex < res.list[i + 1].createdIndex) + end + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: routes pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + + code, body, res = t('/apisix/admin/routes/?page=2&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + + code, body, res = t('/apisix/admin/routes/?page=3&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: services pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + + code, body, res = t('/apisix/admin/services/?page=2&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + + code, body, res = t('/apisix/admin/services/?page=3&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: only search name or labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "labels": {"]] .. i .. '":"' .. i .. [["} + }]] + ) + end + + ngx.sleep(0.5) + + local matched = {1, 10, 11} + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + -- match the name are 1, 10, 11 + assert(#res.list == 3) + + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + code, body, res = t('/apisix/admin/services/?label=1', + ngx.HTTP_GET + ) + res = json.decode(res) + -- match the label are 1, 10, 11 + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: services filter +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: routes filter +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "uri": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: filter with pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/services/?name=1&page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + -- we do filtering first now, so it will first filter to 1, 10, 11, and then paginate + -- res will contain 1, 10, 11 instead of just 1, 10. + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: routes filter with uri +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "uri": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: match labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello2", + "labels": { + "env2": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- only match labels' keys + local code, body, res = t('/apisix/admin/routes/?label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + -- don't match labels' values + code, body, res = t('/apisix/admin/routes/?label=production', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: match uris +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/foo", "/bar"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=world', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: match uris & labels +# uris are same in different routes, filter by labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"], + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"], + "labels": { + "build": "16" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- only match route 1 + local code, body, res = t('/apisix/admin/routes/?uri=world&label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: match uri & labels +# uri is same in different routes, filter by labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env2": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=hello&label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: filtered data total +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/routes', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 11) + + local code, body, res = t('/apisix/admin/routes/?label=', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 0) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: pagination data total +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/routes?page=1&page_size=10', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 10) + + local code, body, res = t('/apisix/admin/routes?page=10&page_size=10', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/admin/global-rules.t b/t/admin/global-rules.t index 422652a46c74..8f454c834fe3 100644 --- a/t/admin/global-rules.t +++ b/t/admin/global-rules.t @@ -45,20 +45,17 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "set" + "key": "/apisix/global_rules/1" }]] ) @@ -90,20 +87,17 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "get" + "key": "/apisix/global_rules/1" }]] ) @@ -129,27 +123,23 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "dir": true, - "nodes": [ + "total": 1, + "list": [ { "key": "/apisix/global_rules/1", "value": { - "plugins": { - "limit-count": { - "time_window": 60, - "policy": "local", - "count": 2, - "key": "remote_addr", - "rejected_code": 503 + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } } } - } } - ], - "key": "/apisix/global_rules" - }, - "action": "get" + ] }]] ) @@ -191,20 +181,17 @@ passed } }}]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 3, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/global_rules/1" }]] ) @@ -252,20 +239,17 @@ passed } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 3, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/global_rules/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/global_rules/1" }]] ) @@ -295,12 +279,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -319,12 +299,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code) } } @@ -441,13 +417,13 @@ passed end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} --- request GET /t --- no_error_log @@ -479,13 +455,13 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} --- request GET /t --- no_error_log @@ -510,18 +486,19 @@ GET /t end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} --- request GET /t --- no_error_log @@ -550,7 +527,7 @@ GET /t } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/global_rules/1","node":{}} +{"deleted":"1","key":"/apisix/global_rules/1"} --- request GET /t --- no_error_log diff --git a/t/admin/global-rules2.t b/t/admin/global-rules2.t index 6ff033b52f43..345f67f62283 100644 --- a/t/admin/global-rules2.t +++ b/t/admin/global-rules2.t @@ -60,7 +60,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/global_rules","nodes":[]}} +{"list":[],"total":0} @@ -88,13 +88,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/"}}}}} +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} @@ -116,11 +116,18 @@ __DATA__ end res = json.decode(res) - ngx.say(json.encode(res)) + assert(res.total == 1) + assert(#res.list == 1) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/global_rules/1") + assert(res.list[1].value ~= nil) + + ngx.say(message) } } --- response_body_like -{"action":"get","count":1,"node":\{"dir":true,"key":"/apisix/global_rules","nodes":.* +passed @@ -130,12 +137,8 @@ __DATA__ content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/admin/health-check.t b/t/admin/health-check.t index d26e92428c9b..1b0a9ca57ce1 100644 --- a/t/admin/health-check.t +++ b/t/admin/health-check.t @@ -43,11 +43,8 @@ add_block_preprocessor(sub { "uri": "/index.html" }]]) exp_data = { - node = { - value = req_data, - key = "/apisix/routes/1", - }, - action = "set", + value = req_data, + key = "/apisix/routes/1", } _EOC_ @@ -87,9 +84,9 @@ __DATA__ } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks - local code, body = t('/apisix/admin/routes/1', + local code, body, res = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data, exp_data @@ -130,7 +127,7 @@ passed } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -318,7 +315,7 @@ passed "req_headers": ["User-Agent: curl/7.29.0"] } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -352,7 +349,7 @@ passed "req_headers": ["User-Agent: curl/7.29.0", "Accept: */*"] } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -386,7 +383,7 @@ passed "req_headers": ["User-Agent: curl/7.29.0", 2233] } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -422,7 +419,7 @@ passed } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, @@ -460,8 +457,8 @@ passed } } }]]) - exp_data.node.value.upstream.checks.active = req_data.upstream.checks.active - exp_data.node.value.upstream.checks.passive = { + exp_data.value.upstream.checks.active = req_data.upstream.checks.active + exp_data.value.upstream.checks.passive = { type = "http", healthy = { http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, @@ -512,7 +509,7 @@ passed } } }]]) - exp_data.node.value.upstream.checks = req_data.upstream.checks + exp_data.value.upstream.checks = req_data.upstream.checks local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, diff --git a/t/admin/plugin-configs.t b/t/admin/plugin-configs.t index 1f0da8a2a463..852631666eaf 100644 --- a/t/admin/plugin-configs.t +++ b/t/admin/plugin-configs.t @@ -57,20 +57,17 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "set" + "key": "/apisix/plugin_configs/1" }]] ) @@ -98,20 +95,17 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "get" + "key": "/apisix/plugin_configs/1" }]] ) @@ -133,27 +127,23 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "dir": true, - "nodes": [ + "total": 1, + "list": [ { "key": "/apisix/plugin_configs/1", "value": { - "plugins": { - "limit-count": { - "time_window": 60, - "policy": "local", - "count": 2, - "key": "remote_addr", - "rejected_code": 503 + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } } } - } } - ], - "key": "/apisix/plugin_configs" - }, - "action": "get" + ] }]] ) @@ -191,20 +181,17 @@ passed } }}]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 3, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/plugin_configs/1" }]] ) @@ -248,20 +235,17 @@ passed } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } - }, - "key": "/apisix/plugin_configs/1" + } }, - "action": "compareAndSwap" + "key": "/apisix/plugin_configs/1" }]] ) @@ -332,24 +316,21 @@ passed "desc": "blah" }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } - }, - "labels": { - "你好": "世界" - }, - "desc": "blah" + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" }, - "key": "/apisix/plugin_configs/1" + "desc": "blah" }, - "action": "set" + "key": "/apisix/plugin_configs/1" }]] ) @@ -377,24 +358,21 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } - }, - "labels": { - "你好": "世界" - }, - "desc": "blah" + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" }, - "key": "/apisix/plugin_configs/1" + "desc": "blah" }, - "action": "get" + "key": "/apisix/plugin_configs/1" }]] ) @@ -502,10 +480,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_configs/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.print(body) } } @@ -521,10 +497,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.say(body) } } @@ -540,10 +514,8 @@ passed ngx.sleep(0.3) local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_configs/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + ngx.HTTP_DELETE + ) ngx.say(body) } } diff --git a/t/admin/plugin-metadata.t b/t/admin/plugin-metadata.t index 5dccf2c0cd2d..5874e4e039c5 100644 --- a/t/admin/plugin-metadata.t +++ b/t/admin/plugin-metadata.t @@ -38,15 +38,13 @@ __DATA__ "ikey": 1 }]], [[{ - "node": { - "value": { - "skey": "val", - "ikey": 1 - } + "value": { + "skey": "val", + "ikey": 1 }, - "action": "set" + "key": "/apisix/plugin_metadata/example-plugin" }]] - ) + ) ngx.status = code ngx.say(body) @@ -73,15 +71,12 @@ passed "ikey": 2 }]], [[{ - "node": { - "value": { - "skey": "val2", - "ikey": 2 - } - }, - "action": "set" + "value": { + "skey": "val2", + "ikey": 2 + } }]] - ) + ) ngx.status = code ngx.say(body) @@ -94,15 +89,12 @@ passed "ikey": 2 }]], [[{ - "node": { - "value": { - "skey": "val2", - "ikey": 2 - } - }, - "action": "set" + "value": { + "skey": "val2", + "ikey": 2 + } }]] - ) + ) ngx.say(code) ngx.say(body) @@ -128,15 +120,12 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "skey": "val2", - "ikey": 2 - } - }, - "action": "get" + "value": { + "skey": "val2", + "ikey": 2 + } }]] - ) + ) ngx.status = code ngx.say(body) @@ -157,11 +146,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_DELETE) ngx.status = code ngx.say(body) @@ -181,13 +166,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/plugin_metadata/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/plugin_metadata/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } } @@ -206,15 +185,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata', - ngx.HTTP_PUT, - [[{"k": "v"}]], + ngx.HTTP_PUT, + [[{"k": "v"}]], [[{ - "node": { - "value": "sdf" - }, - "action": "set" + "value": "sdf" }]] - ) + ) ngx.status = code ngx.print(body) @@ -236,15 +212,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata/test', - ngx.HTTP_PUT, - [[{"k": "v"}]], + ngx.HTTP_PUT, + [[{"k": "v"}]], [[{ - "node": { - "value": "sdf" - }, - "action": "set" + "value": "sdf" }]] - ) + ) ngx.status = code ngx.print(body) @@ -271,15 +244,12 @@ GET /t "skey": "val" }]], [[{ - "node": { - "value": { - "skey": "val", - "ikey": 1 - } - }, - "action": "set" + "value": { + "skey": "val", + "ikey": 1 + } }]] - ) + ) ngx.status = code ngx.say(body) @@ -302,12 +272,12 @@ qr/\{"error_msg":"invalid configuration: property \\"ikey\\" is required"\}/ local json = require("toolkit.json") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_PUT, + ngx.HTTP_PUT, [[{ "skey": "val", "ikey": 1 }]] - ) + ) if code >= 300 then ngx.status = code @@ -316,13 +286,11 @@ qr/\{"error_msg":"invalid configuration: property \\"ikey\\" is required"\}/ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}}} +{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}} --- request GET /t --- no_error_log @@ -336,9 +304,7 @@ GET /t content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -347,14 +313,17 @@ GET /t end res = json.decode(res) - local value = res.node.value - assert(res.count ~= nil) - res.count = nil + + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}}} +{"key":"/apisix/plugin_metadata/example-plugin","value":{"ikey":1,"skey":"val"}} --- request GET /t --- no_error_log @@ -368,9 +337,7 @@ GET /t content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', - ngx.HTTP_DELETE - ) + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code @@ -383,7 +350,7 @@ GET /t } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/plugin_metadata/example-plugin","node":{}} +{"deleted":"1","key":"/apisix/plugin_metadata/example-plugin"} --- request GET /t --- no_error_log diff --git a/t/admin/plugin-metadata2.t b/t/admin/plugin-metadata2.t index 190b9ae20faf..8bce8182ec39 100644 --- a/t/admin/plugin-metadata2.t +++ b/t/admin/plugin-metadata2.t @@ -45,9 +45,7 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/plugin_metadata', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/plugin_metadata', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -60,4 +58,4 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/plugin_metadata","nodes":[]}} +{"list":[],"total":0} diff --git a/t/admin/plugins.t b/t/admin/plugins.t index 2bfb5fee30ce..cfa7173f66b2 100644 --- a/t/admin/plugins.t +++ b/t/admin/plugins.t @@ -64,9 +64,9 @@ __DATA__ real-ip client-control proxy-control +request-id zipkin ext-plugin-pre-req -request-id fault-injection mocking serverless-pre-function @@ -93,6 +93,7 @@ authz-keycloak proxy-mirror proxy-cache proxy-rewrite +workflow api-breaker limit-conn limit-count @@ -108,6 +109,7 @@ grpc-web public-api prometheus datadog +elasticsearch-logger echo loggly http-logger @@ -122,10 +124,12 @@ syslog udp-logger file-logger clickhouse-logger +tencent-cloud-cls example-plugin aws-lambda azure-functions openwhisk +openfunction serverless-post-function ext-plugin-post-req ext-plugin-post-resp @@ -150,7 +154,7 @@ GET /apisix/admin/plugins ngx.HTTP_GET, nil, [[ - {"type":"object","required":["rate","burst","key"],"properties":{"rate":{"type":"number","exclusiveMinimum":0},"key_type":{"type":"string","enum":["var","var_combination"],"default":"var"},"burst":{"type":"number","minimum":0},"disable":{"type":"boolean"},"nodelay":{"type":"boolean","default":false},"key":{"type":"string"},"rejected_code":{"type":"integer","minimum":200,"maximum":599,"default":503},"rejected_msg":{"type":"string","minLength":1},"allow_degradation":{"type":"boolean","default":false}}} + {"type":"object","required":["rate","burst","key"],"properties":{"rate":{"type":"number","exclusiveMinimum":0},"key_type":{"type":"string","enum":["var","var_combination"],"default":"var"},"burst":{"type":"number","minimum":0},"nodelay":{"type":"boolean","default":false},"key":{"type":"string"},"rejected_code":{"type":"integer","minimum":200,"maximum":599,"default":503},"rejected_msg":{"type":"string","minLength":1},"allow_degradation":{"type":"boolean","default":false}}} ]] ) @@ -172,7 +176,7 @@ plugins: ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"type":"object"} +{"properties":{},"type":"object"} ]] ) @@ -191,7 +195,7 @@ plugins: ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"type":"object"} +{"properties":{},"type":"object"} ]] ) @@ -210,7 +214,7 @@ plugins: ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) @@ -265,7 +269,7 @@ plugins: } } --- response_body eval -qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"properties":\{"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\}\},"type":"object"\},"disable":\{"type":"boolean"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/ +qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"properties":\{"disable":\{"type":"boolean"\},"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\},"filter":\{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"\},"priority":\{"description":"priority of plugins by customized order","type":"integer"\}\},"type":"object"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/ @@ -366,7 +370,7 @@ qr/\{"properties":\{"password":\{"type":"string"\},"username":\{"type":"string"\ } } --- response_body -{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"properties":{"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"disable":{"type":"boolean"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1} +{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"properties":{"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"filter":{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"},"priority":{"description":"priority of plugins by customized order","type":"integer"}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1} diff --git a/t/admin/proto.t b/t/admin/proto.t index 3a05a26df9a8..e560ffffcd20 100644 --- a/t/admin/proto.t +++ b/t/admin/proto.t @@ -43,7 +43,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/1', + local code, message = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content": "syntax = \"proto3\"; @@ -61,15 +61,10 @@ __DATA__ // Sends a greeting rpc SayHi (HelloRequest) returns (HelloResponse){} }" - }]], - [[ - { - "action": "set" - } - ]] - ) + }]] + ) - if code ~= 200 then + if code ~= 201 then ngx.status = code ngx.say("[put proto] code: ", code, " message: ", message) return @@ -79,7 +74,7 @@ __DATA__ } } --- response_body -[put proto] code: 200 message: passed +[put proto] code: 201 message: passed @@ -88,13 +83,9 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_DELETE + ) if code ~= 200 then ngx.status = code @@ -115,7 +106,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/2', + local code, message = t('/apisix/admin/protos/2', ngx.HTTP_PUT, [[{ "content": "syntax = \"proto3\"; @@ -133,15 +124,10 @@ __DATA__ // Sends a greeting rpc SayHi (HelloRequest) returns (HelloResponse){} }" - }]], - [[ - { - "action": "set" - } - ]] - ) + }]] + ) - if code ~= 200 then + if code ~= 201 then ngx.status = code ngx.say("[put proto] code: ", code, " message: ", message) return @@ -155,7 +141,9 @@ __DATA__ "methods": ["GET"], "plugins": { "grpc-transcode": { - "disable": false, + "_meta": { + "disable": false + }, "method": "SayHi", "proto_id": 2, "service": "proto.Hello" @@ -169,13 +157,10 @@ __DATA__ }, "uri": "/grpc/sayhi", "name": "hi-grpc" - }]], - [[{ - "action": "set" }]] - ) + ) - if code ~= 200 then + if code ~= 201 then ngx.status = code ngx.say("[route refer proto] code: ", code, " message: ", message) return @@ -184,20 +169,16 @@ __DATA__ ngx.sleep(0.1) -- ensure reference is synced from etcd - code, message = t('/apisix/admin/proto/2', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/protos/2', + ngx.HTTP_DELETE + ) ngx.say("[delete proto] code: ", code) } } --- response_body -[put proto] code: 200 message: passed -[route refer proto] code: 200 message: passed +[put proto] code: 201 message: passed +[route refer proto] code: 201 message: passed [delete proto] code: 400 @@ -207,7 +188,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/proto/1', + local code, message = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content": "syntax = \"proto3\"; diff --git a/t/admin/protos.t b/t/admin/protos.t new file mode 100644 index 000000000000..320c6179ee7f --- /dev/null +++ b/t/admin/protos.t @@ -0,0 +1,77 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: test /apisix/admin/protos/{id} +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 2; + } + // The greeting service definition. + service Hello { + // Sends a greeting + rpc SayHi (HelloRequest) returns (HelloResponse){} + }" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[put proto] code: ", code, " message: ", message) + return + end + + ngx.say("[put proto] code: ", code, " message: ", message) + } + } +--- response_body +[put proto] code: 201 message: passed diff --git a/t/admin/response_body_format.t b/t/admin/response_body_format.t new file mode 100644 index 000000000000..ae7431387172 --- /dev/null +++ b/t/admin/response_body_format.t @@ -0,0 +1,250 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + admin_key: null + admin_api_version: v3 +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: use v3 admin api, no action in response body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: response body format only have total and list (total is 1) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + res = json.decode(res) + assert(res.total == 1) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/routes/1") + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: response body format only have total and list (total is 2) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 2) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/routes/1") + assert(res.list[2].createdIndex ~= nil) + assert(res.list[2].modifiedIndex ~= nil) + assert(res.list[2].key == "/apisix/routes/2") + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 4: response body format (test services) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 001" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/services/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 002" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, message, res = t('/apisix/admin/services', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 2) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/services/1") + assert(res.list[2].createdIndex ~= nil) + assert(res.list[2].modifiedIndex ~= nil) + assert(res.list[2].key == "/apisix/services/2") + ngx.say(message) + } + } +--- response_body +passed +passed +passed diff --git a/t/admin/routes-array-nodes.t b/t/admin/routes-array-nodes.t index c9b141883a28..c25b016df786 100644 --- a/t/admin/routes-array-nodes.t +++ b/t/admin/routes-array-nodes.t @@ -47,27 +47,24 @@ __DATA__ "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -88,30 +85,27 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_GET, - nil, + ngx.HTTP_GET, + nil, [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } }, - "action": "get" + "key": "/apisix/routes/1" }]] - ) + ) ngx.status = code ngx.say(body) diff --git a/t/admin/routes.t b/t/admin/routes.t index a16ccdbdfab4..1e7575fe7887 100644 --- a/t/admin/routes.t +++ b/t/admin/routes.t @@ -45,23 +45,20 @@ __DATA__ "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -87,23 +84,20 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "get" + "key": "/apisix/routes/1" }]] ) @@ -126,12 +120,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -150,12 +140,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code = t('/apisix/admin/routes/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code) } } @@ -187,21 +173,18 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "create" + } }]] ) @@ -212,8 +195,7 @@ GET /t end ngx.say("[push] code: ", code, " message: ", message) - - local id = string.sub(res.node.key, #"/apisix/routes/" + 1) + local id = string.sub(res.key, #"/apisix/routes/" + 1) local res = assert(etcd.get('/routes/' .. id)) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") @@ -221,12 +203,8 @@ GET /t assert(update_time ~= nil, "update_time is nil") code, message = t('/apisix/admin/routes/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -259,18 +237,15 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "set" + } }]] ) @@ -318,20 +293,17 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "uri": "/index.html", - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } } - }, - "action": "set" + } }]] ) @@ -671,7 +643,9 @@ GET /t "time_window": 60, "rejected_code": 503, "key": "remote_addr", - "disable": true + "_meta": { + "disable": true + } } }, "uri": "/index.html" @@ -714,20 +688,17 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "host": "*.foo.com", - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "host": "*.foo.com", + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) diff --git a/t/admin/routes2.t b/t/admin/routes2.t index 42cbc805b120..fce777957910 100644 --- a/t/admin/routes2.t +++ b/t/admin/routes2.t @@ -194,18 +194,18 @@ GET /t end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + res.key = nil + res.value.create_time = nil + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"create","node":{"value":{"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/not_unwanted_data_post"}}} +{"value":{"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/not_unwanted_data_post"}} --- no_error_log [error] @@ -239,15 +239,15 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"set","node":{"key":"/apisix/routes/1","value":{"id":1,"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index.html"}}} +{"key":"/apisix/routes/1","value":{"id":1,"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index.html"}} --- no_error_log [error] @@ -280,15 +280,15 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}}} +{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}} --- no_error_log [error] @@ -311,20 +311,21 @@ GET /t end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- request GET /t --- response_body -{"action":"get","node":{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}}} +{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}} --- no_error_log [error] @@ -353,7 +354,7 @@ GET /t --- request GET /t --- response_body -{"action":"delete","deleted":"1","key":"/apisix/routes/1","node":{}} +{"deleted":"1","key":"/apisix/routes/1"} --- no_error_log [error] @@ -550,25 +551,22 @@ GET /t "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "您好": "世界" - } + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "labels": { + "您好": "世界" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) diff --git a/t/admin/routes3.t b/t/admin/routes3.t index 60e4d4134655..331f1b2d4573 100644 --- a/t/admin/routes3.t +++ b/t/admin/routes3.t @@ -60,7 +60,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/routes","nodes":[]}} +{"list":[],"total":0} @@ -82,20 +82,17 @@ __DATA__ "uri": "/index.html" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "uri": "/index.html" + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "uri": "/index.html" }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -128,20 +125,17 @@ passed "uri": "/index.html" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.0/24", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.0/24", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "uri": "/index.html" + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "uri": "/index.html" }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -235,13 +229,10 @@ passed "uri": "/patch_test" }]], [[{ - "node": { - "value": { - "uri": "/patch_test" - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/patch_test" }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -278,23 +269,20 @@ passed "desc": "new route" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/patch_test", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.2:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/patch_test", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.2:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -318,13 +306,10 @@ passed "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] }]], [[{ - "node": { - "value": { - "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] - }, - "key": "/apisix/routes/1" + "value": { + "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -348,13 +333,10 @@ passed "methods": ["GET", "POST"] }]], [[{ - "node": { - "value": { - "methods": ["GET", "POST"] - }, - "key": "/apisix/routes/1" + "value": { + "methods": ["GET", "POST"] }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -376,15 +358,12 @@ passed ngx.HTTP_PATCH, '["POST"]', [[{ - "node": { - "value": { - "methods": [ - "POST" - ] - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "POST" + ] }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -406,13 +385,10 @@ passed ngx.HTTP_PATCH, '"/patch_uri_test"', [[{ - "node": { - "value": { - "uri": "/patch_uri_test" - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/patch_uri_test" }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -444,23 +420,20 @@ passed "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "desc": "new route", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -492,10 +465,8 @@ passed "desc": "new route" }]], [[{ - "node": { - "value": { - "hosts": ["foo.com", "*.bar.com"] - } + "value": { + "hosts": ["foo.com", "*.bar.com"] } }]] ) @@ -559,10 +530,8 @@ passed "desc": "new route" }]], [[{ - "node": { - "value": { - "remote_addrs": ["127.0.0.1", "192.0.0.1/8", "::1", "fe80::/32"] - } + "value": { + "remote_addrs": ["127.0.0.1", "192.0.0.1/8", "::1", "fe80::/32"] } }]] ) @@ -595,10 +564,8 @@ passed "desc": "new route" }]=], [=[{ - "node": { - "value": { - "vars": [["arg_name", "==", "json"], ["arg_age", ">", 18]] - } + "value": { + "vars": [["arg_name", "==", "json"], ["arg_age", ">", 18]] } }]=] ) @@ -630,10 +597,8 @@ passed } }]=], [=[{ - "node": { - "value": { - "filter_func": "function(vars) return vars.arg_name == 'json' end" - } + "value": { + "filter_func": "function(vars) return vars.arg_name == 'json' end" } }]=] ) @@ -738,13 +703,10 @@ passed ngx.HTTP_PATCH, 'false', [[{ - "node": { - "value": { - "enable_websocket": false - }, - "key": "/apisix/routes/1" + "value": { + "enable_websocket": false }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -766,13 +728,10 @@ passed ngx.HTTP_PATCH, 'true', [[{ - "node": { - "value": { - "enable_websocket": true - }, - "key": "/apisix/routes/1" + "value": { + "enable_websocket": true }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) diff --git a/t/admin/routes4.t b/t/admin/routes4.t index 158d6ad6bdc8..3c799be8be90 100644 --- a/t/admin/routes4.t +++ b/t/admin/routes4.t @@ -69,12 +69,10 @@ location /t { ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "uri": "/index.html" - }, - "key": "/apisix/routes/1" - } + "value": { + "uri": "/index.html" + }, + "key": "/apisix/routes/1" }]] ) @@ -119,7 +117,7 @@ location /t { }, "uri": "/index.html" }]], - [[{"action": "create"}]] + [[{}]] ) if code >= 300 then @@ -131,7 +129,7 @@ location /t { ngx.say("[push] succ: ", body) ngx.sleep(2.5) - local id = string.sub(res.node.key, #"/apisix/routes/" + 1) + local id = string.sub(res.key, #"/apisix/routes/" + 1) code, body = t('/apisix/admin/routes/' .. id, ngx.HTTP_GET) ngx.say("code: ", code) @@ -198,13 +196,10 @@ location /t { "uri": "/index.html" }]], [[{ - "node": { - "value": { - "priority": 0 - }, - "key": "/apisix/routes/1" + "value": { + "priority": 0 }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -237,13 +232,10 @@ passed "priority": 1 }]], [[{ - "node": { - "value": { - "priority": 1 - }, - "key": "/apisix/routes/1" + "value": { + "priority": 1 }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -389,13 +381,10 @@ passed "uri": "/index.html" }]], [[{ - "node": { - "value": { - "name": "test name" - }, - "key": "/apisix/routes/1" + "value": { + "name": "test name" }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -664,27 +653,24 @@ failed to read request body: request size 1678025 is greater than the maximum si "uri": "/index.html" }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "build": "16", - "env": "production", - "version": "v2" - } + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "labels": { + "build": "16", + "env": "production", + "version": "v2" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -710,27 +696,24 @@ passed } }]], [[{ - "node": { - "value": { - "methods": [ - "GET" - ], - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "env": "production", - "version": "v2", - "build": "17" - } + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "labels": { + "env": "production", + "version": "v2", + "build": "17" + } }, - "action": "compareAndSwap" + "key": "/apisix/routes/1" }]] ) @@ -788,21 +771,18 @@ passed "update_time": 1602893670 }]], [[{ - "node": { - "value": { - "uri": "/index.html", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "create_time": 1602883670, - "update_time": 1602893670 + "type": "roundrobin" }, - "key": "/apisix/routes/1" + "create_time": 1602883670, + "update_time": 1602893670 }, - "action": "set" + "key": "/apisix/routes/1" }]] ) @@ -821,12 +801,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/admin/services-array-nodes.t b/t/admin/services-array-nodes.t index 7ca2c6cb5f8a..2d0251377c72 100644 --- a/t/admin/services-array-nodes.t +++ b/t/admin/services-array-nodes.t @@ -32,8 +32,8 @@ __DATA__ content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": [{ "host": "127.0.0.1", @@ -45,23 +45,20 @@ __DATA__ "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - }, - "desc": "new service" + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -85,23 +82,20 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "upstream": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - }, - "desc": "new service" + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "get" + "key": "/apisix/services/1" }]] - ) + ) ngx.status = code ngx.say(body) diff --git a/t/admin/services-string-id.t b/t/admin/services-string-id.t index 15160c5a6c63..4c786b81d1b8 100644 --- a/t/admin/services-string-id.t +++ b/t/admin/services-string-id.t @@ -43,21 +43,18 @@ __DATA__ "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) ngx.status = code ngx.say(body) @@ -78,24 +75,21 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_GET, - nil, + ngx.HTTP_GET, + nil, [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new service" }, - "action": "get" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) ngx.status = code ngx.say(body) @@ -115,13 +109,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -139,13 +127,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/services/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/services/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -165,8 +147,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/services', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -175,19 +157,16 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "create" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -197,14 +176,8 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/services/" + 1) - code, message = t('/apisix/admin/services/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local id = string.sub(res.key, #"/apisix/services/" + 1) + code, message = t('/apisix/admin/services/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -225,8 +198,8 @@ GET /t local core = require("apisix.core") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -235,19 +208,17 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) if code ~= 200 then ngx.status = code @@ -274,8 +245,8 @@ GET /t local core = require("apisix.core") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "plugins": { "limit-count": { "count": 2, @@ -286,21 +257,19 @@ GET /t } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } } }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) if code ~= 200 then ngx.status = code @@ -326,8 +295,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/*invalid_id$', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "plugins": { "limit-count": { "count": 2, @@ -337,7 +306,7 @@ GET /t } } }]] - ) + ) ngx.exit(code) } @@ -356,12 +325,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "3", "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -383,21 +352,18 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "5eeb3dc90f747328b2930b0b", "plugins": {} }]], [[{ - "node": { - "value": { - "plugins": {} - }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "value": { + "plugins": {} }, - "action": "set" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] - ) + ) ngx.status = code ngx.say(body) @@ -418,12 +384,12 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": -100, "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -450,7 +416,7 @@ GET /t "id": "*invalid_id$", "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -472,12 +438,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "5eeb3dc90f747328b2930b0b", "upstream_id": "invalid$" }]] - ) + ) ngx.status = code ngx.print(body) @@ -499,12 +465,12 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "5eeb3dc90f747328b2930b0b", "upstream_id": "9999999999" }]] - ) + ) ngx.status = code ngx.print(body) @@ -526,11 +492,11 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -557,7 +523,7 @@ GET /t "id": "5eeb3dc90f747328b2930b0b", "plugins": {} }]] - ) + ) ngx.status = code ngx.print(body) @@ -590,19 +556,16 @@ GET /t "desc": "new 20 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 20 service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new 20 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] ) @@ -630,19 +593,16 @@ passed "desc": "new 19 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 19 service" + "type": "roundrobin" }, - "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + "desc": "new 19 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" }]] ) @@ -676,16 +636,14 @@ passed } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1, - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" } } }]] @@ -710,8 +668,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -719,7 +677,8 @@ passed "type": "chash" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.print(body) @@ -741,8 +700,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -751,7 +710,8 @@ GET /t "hash_on": "header" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.print(body) @@ -773,8 +733,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -783,7 +743,8 @@ GET /t "hash_on": "cookie" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.print(body) @@ -805,8 +766,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream": { "nodes": { "127.0.0.1:8080": 1 @@ -815,7 +776,8 @@ GET /t "hash_on": "consumer" }, "desc": "new service" - }]]) + }]] + ) ngx.status = code ngx.say(code .. " " .. body) diff --git a/t/admin/services.t b/t/admin/services.t index 409e47c8ca4f..7bdfb9e5522c 100644 --- a/t/admin/services.t +++ b/t/admin/services.t @@ -44,19 +44,16 @@ __DATA__ "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -88,19 +85,16 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "get" + "key": "/apisix/services/1" }]] ) @@ -122,13 +116,8 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message,res = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) } } @@ -146,13 +135,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/services/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/services/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -183,17 +166,14 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "create" + } }]] ) @@ -205,20 +185,14 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/services/" + 1) + local id = string.sub(res.key, #"/apisix/services/" + 1) local res = assert(etcd.get('/services/' .. id)) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time assert(update_time ~= nil, "update_time is nil") - code, message = t('/apisix/admin/services/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/services/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -249,17 +223,14 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" } - }, - "action": "set" + } }]] ) @@ -300,19 +271,16 @@ GET /t } }]], [[{ - "node": { - "value": { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - } + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" } } - }, - "action": "set" + } }]] ) @@ -370,8 +338,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 3, "plugins": {} }]] @@ -397,19 +365,16 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "1", "plugins": {} }]], [[{ - "node": { - "value": { - "plugins": {} - }, - "key": "/apisix/services/1" + "value": { + "plugins": {} }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -432,8 +397,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": -100, "plugins": {} }]] @@ -459,8 +424,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": "invalid_id$", "plugins": {} }]] @@ -486,8 +451,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 1, "upstream_id": "invalid$" }]] @@ -513,8 +478,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 1, "upstream_id": "9999999999" }]] @@ -540,8 +505,8 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "plugins": {} }]] ) @@ -612,19 +577,16 @@ GET /t "desc": "new 20 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 20 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 20 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -658,19 +620,16 @@ passed "desc": "new 19 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 19 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 19 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -704,16 +663,14 @@ passed } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1, - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" } } }]] @@ -749,19 +706,16 @@ passed "desc": "new 22 service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 22 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 22 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -787,19 +741,16 @@ passed ngx.HTTP_PATCH, '"new 23 service"', [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new 23 service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "desc": "new 23 service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -831,15 +782,13 @@ passed "type": "roundrobin" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.2:8081": 3, - "127.0.0.3:8082": 4 - }, - "type": "roundrobin" - } + "value": { + "upstream": { + "nodes": { + "127.0.0.2:8081": 3, + "127.0.0.3:8082": 4 + }, + "type": "roundrobin" } } }]] @@ -1001,19 +950,16 @@ GET /t "name": "test service name" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "name": "test service name" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "name": "test service name" }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -1070,10 +1016,9 @@ GET /t ngx.HTTP_PUT, '{}', [[{ - "node": { - "value": {"id":"1"} - }, - "action": "set" + "value": { + "id":"1" + } }]] ) @@ -1115,30 +1060,27 @@ passed } }]], [[{ - "node":{ - "value":{ - "desc":"empty service", - "plugins":{ - "limit-count":{ - "time_window":60, - "count":2, - "rejected_code":503, - "key":"remote_addr", - "policy":"local" - } - }, - "upstream":{ - "type":"roundrobin", - "nodes":{ - "127.0.0.1:80":1 - }, - "hash_on":"vars", - "pass_host":"pass" + "value":{ + "desc":"empty service", + "plugins":{ + "limit-count":{ + "time_window":60, + "count":2, + "rejected_code":503, + "key":"remote_addr", + "policy":"local" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 }, - "id":"1" - } - }, - "action":"compareAndSwap" + "hash_on":"vars", + "pass_host":"pass" + }, + "id":"1" + } }]] ) @@ -1177,24 +1119,21 @@ passed "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "labels": { - "build": "16", - "env": "production", - "version": "v2" - }, - "desc": "new service" + "type": "roundrobin" + }, + "labels": { + "build": "16", + "env": "production", + "version": "v2" }, - "key": "/apisix/services/1" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -1224,24 +1163,21 @@ passed } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - }, - "labels": { - "build": "17", - "env": "production", - "version": "v2" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/1" + "labels": { + "build": "17", + "env": "production", + "version": "v2" + }, + "desc": "new service" }, - "action": "compareAndSwap" + "key": "/apisix/services/1" }]] ) @@ -1311,20 +1247,17 @@ GET /t } }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "create_time": 1602883670, - "update_time": 1602893670 - } - }, - "key": "/apisix/services/1" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "create_time": 1602883670, + "update_time": 1602893670 + } }, - "action": "set" + "key": "/apisix/services/1" }]] ) @@ -1346,13 +1279,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -1370,10 +1297,8 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - require("toolkit.json").encode({name = ("1"):rep(101)}) - ) + local code, body = t('/apisix/admin/services/1', ngx.HTTP_PUT, + require("toolkit.json").encode({name = ("1"):rep(101)})) ngx.status = code ngx.print(body) @@ -1406,19 +1331,16 @@ GET /t "desc": "new service" }]], [[{ - "node": { - "value": { - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new service" + "type": "roundrobin" }, - "key": "/apisix/services/a.b" + "desc": "new service" }, - "action": "set" + "key": "/apisix/services/a.b" }]] ) diff --git a/t/admin/services2.t b/t/admin/services2.t index 9cbe8e725727..a47592badf74 100644 --- a/t/admin/services2.t +++ b/t/admin/services2.t @@ -63,16 +63,18 @@ __DATA__ end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"value":{"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -101,13 +103,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -136,13 +138,13 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + res.value.create_time = nil + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -152,9 +154,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/services/1', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/services/1', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -163,18 +163,19 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} @@ -199,7 +200,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/services/1","node":{}} +{"deleted":"1","key":"/apisix/services/1"} @@ -237,8 +238,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "service_id": 1, "uri": "/index.html" }]] @@ -261,11 +262,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.print("[delete] code: ", code, " message: ", message) } } @@ -280,11 +277,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -299,11 +292,7 @@ passed content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/admin/ssl.t b/t/admin/ssl.t index 0232d21102fe..e8bf91064c7c 100644 --- a/t/admin/ssl.t +++ b/t/admin/ssl.t @@ -34,24 +34,21 @@ __DATA__ local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) ngx.status = code ngx.say(body) - local res = assert(etcd.get('/ssl/1')) + local res = assert(etcd.get('/ssls/1')) local prev_create_time = res.body.node.value.create_time assert(prev_create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time @@ -73,19 +70,15 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/ssl/1', + local code, body = t('/apisix/admin/ssls/1', ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "sni": "test.com", - "key": null - }, - - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "key": null }, - "action": "get" + "key": "/apisix/ssls/1" }]] ) @@ -107,13 +100,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/ssl/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/ssls/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -131,13 +118,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/ssl/99999999999999', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/ssls/99999999999999', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } } @@ -161,16 +142,13 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "foo.com"} - local code, message, res = t.test('/apisix/admin/ssl', + local code, message, res = t.test('/apisix/admin/ssls', ngx.HTTP_POST, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "foo.com" - } - }, - "action": "create" + "value": { + "sni": "foo.com" + } }]] ) @@ -182,14 +160,8 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/ssl/" + 1) - code, message = t.test('/apisix/admin/ssl/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local id = string.sub(res.key, #"/apisix/ssls/" + 1) + code, message = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -214,17 +186,14 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {sni = "foo.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "foo.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "foo.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -253,17 +222,14 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.foo.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.foo.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.foo.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -294,17 +260,14 @@ passed snis = {"*.foo.com", "bar.com"}, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "snis": ["*.foo.com", "bar.com"] - }, - "key": "/apisix/ssl/1" + "value": { + "snis": ["*.foo.com", "bar.com"] }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -336,18 +299,15 @@ passed exptime = 1588262400 + 60 * 60 * 24 * 365, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "bar.com", - "exptime": 1619798400 - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "bar.com", + "exptime": 1619798400 }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -375,7 +335,7 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123', + local code, body = t.test('/apisix/admin/ssls/a-b-c-ABC_0123', ngx.HTTP_PUT, core.json.encode(data) ) @@ -405,7 +365,7 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/a-b-c-ABC_0123', + local code, body = t.test('/apisix/admin/ssls/a-b-c-ABC_0123', ngx.HTTP_DELETE ) if code > 300 then @@ -434,7 +394,7 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/*invalid', + local code, body = t.test('/apisix/admin/ssls/*invalid', ngx.HTTP_PUT, core.json.encode(data) ) @@ -471,17 +431,14 @@ GET /t keys = {ssl_ecc_key} } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -513,17 +470,14 @@ passed keys = {}, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -552,23 +506,19 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", labels = { version = "v2", build = "16", env = "production"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com", - "labels": { - "version": "v2", - "build": "16", - "env": "production" - } - }, - - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "labels": { + "version": "v2", + "build": "16", + "env": "production" + } }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -596,21 +546,17 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", labels = { env = {"production", "release"}}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com", - "labels": { - "env": ["production", "release"] - } - }, - - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "labels": { + "env": ["production", "release"] + } }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -647,21 +593,18 @@ GET /t validity_end = 1603893670 } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com", - "create_time": 1602883670, - "update_time": 1602893670, - "validity_start": 1602873670, - "validity_end": 1603893670 - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com", + "create_time": 1602883670, + "update_time": 1602893670, + "validity_start": 1602873670, + "validity_end": 1603893670 }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -683,13 +626,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/ssl/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/ssls/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -714,18 +651,15 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body, res = t.test('/apisix/admin/ssl', + local code, body, res = t.test('/apisix/admin/ssls', ngx.HTTP_POST, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - } - }, - "action": "create" + "value": { + "sni": "test.com" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -733,17 +667,17 @@ GET /t return end - local id = string.sub(res.node.key, #"/apisix/ssl/" + 1) - local res = assert(etcd.get('/ssl/' .. id)) + local id = string.sub(res.key, #"/apisix/ssls/" + 1) + local res = assert(etcd.get('/ssls/' .. id)) local prev_create_time = res.body.node.value.create_time assert(prev_create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time assert(update_time ~= nil, "update_time is nil") - local code, body = t.test('/apisix/admin/ssl/' .. id, + local code, body = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_PATCH, core.json.encode({create_time = 0, update_time = 1}) - ) + ) if code ~= 201 then ngx.status = code @@ -751,16 +685,14 @@ GET /t return end - local res = assert(etcd.get('/ssl/' .. id)) + local res = assert(etcd.get('/ssls/' .. id)) local create_time = res.body.node.value.create_time assert(create_time == 0, "create_time mismatched") local update_time = res.body.node.value.update_time assert(update_time == 1, "update_time mismatched") -- clean up - local code, body = t.test('/apisix/admin/ssl/' .. id, - ngx.HTTP_DELETE - ) + local code, body = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_DELETE) ngx.status = code ngx.say(body) } @@ -785,14 +717,11 @@ passed local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "key": "/apisix/ssl/1" - }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -821,14 +750,11 @@ GET /t local ssl_key = t.read_file("t/certs/apisix.key") local data = {type = "client", cert = ssl_cert, key = ssl_key} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "key": "/apisix/ssl/1" - }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) diff --git a/t/admin/ssl2.t b/t/admin/ssl2.t index 865652ce2e89..c1e1a9e4a626 100644 --- a/t/admin/ssl2.t +++ b/t/admin/ssl2.t @@ -48,7 +48,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "not-unwanted-post.com"} - local code, message, res = t.test('/apisix/admin/ssl', + local code, message, res = t.test('/apisix/admin/ssls', ngx.HTTP_POST, json.encode(data) ) @@ -60,18 +60,23 @@ __DATA__ end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - res.node.value.cert = "" - res.node.value.key = "" - assert(res.node.value.id ~= nil) - res.node.value.id = nil + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1,"type":"server"}}} +{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1,"type":"server"}} @@ -84,7 +89,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -96,15 +101,19 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil - res.node.value.cert = "" - res.node.value.key = "" + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1,"type":"server"}}} +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1,"type":"server"}} @@ -117,7 +126,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "t.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, json.encode(data) ) @@ -129,15 +138,19 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil - res.node.value.cert = "" - res.node.value.key = "" + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1,"type":"server"}}} +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1,"type":"server"}} @@ -147,7 +160,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin") - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_GET ) @@ -158,21 +171,22 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(value.cert ~= nil) - value.cert = "" - assert(value.key == nil) - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key == nil) ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/ssl/1","value":{"cert":"","id":"1","sni":"t.com","status":1,"type":"server"}}} +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","sni":"t.com","status":1,"type":"server"}} @@ -185,7 +199,7 @@ __DATA__ local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE ) @@ -200,7 +214,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/ssl/1","node":{}} +{"deleted":"1","key":"/apisix/ssls/1"} @@ -217,7 +231,7 @@ BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= -----END CERTIFICATE----- ]], key = ssl_key, sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -250,7 +264,7 @@ MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== -----END RSA PRIVATE KEY-----]], sni = "test.com"} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -288,7 +302,7 @@ U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= }, keys = {ssl_key} } - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -324,7 +338,7 @@ jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== -----END RSA PRIVATE KEY-----]]} } - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -353,7 +367,7 @@ wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, snis = {}} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -382,7 +396,7 @@ wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, snis = {"test.com"}} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -395,7 +409,7 @@ wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== local data = {"update1.com", "update2.com"} - local code, message, res = t.test('/apisix/admin/ssl/1/snis', + local code, message, res = t.test('/apisix/admin/ssls/1/snis', ngx.HTTP_PATCH, json.encode(data) ) @@ -429,7 +443,7 @@ apisix: local ssl_cert = t.read_file("t/certs/apisix.crt") local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, certs = {ssl_cert}, keys = {ssl_key}} - local code, message, res = t.test('/apisix/admin/ssl/1', + local code, message, res = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, json.encode(data) ) @@ -441,8 +455,8 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.key == ssl_key) - ngx.say(res.node.value.keys[1] == ssl_key) + ngx.say(res.value.key == ssl_key) + ngx.say(res.value.keys[1] == ssl_key) } } --- response_body @@ -465,7 +479,7 @@ apisix: local t = require("lib.test_admin") local ssl_key = t.read_file("t/certs/apisix.key") - local code, message, res = t.test('/apisix/admin/ssl/1/keys', + local code, message, res = t.test('/apisix/admin/ssls/1/keys', ngx.HTTP_PATCH, json.encode({ssl_key}) ) @@ -477,7 +491,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.keys[1] == ssl_key) + ngx.say(res.value.keys[1] == ssl_key) } } --- response_body diff --git a/t/admin/ssl3.t b/t/admin/ssl3.t index cb09b5119223..f9c1cd0ca3bd 100644 --- a/t/admin/ssl3.t +++ b/t/admin/ssl3.t @@ -45,7 +45,7 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/ssl', + local code, message, res = t('/apisix/admin/ssls', ngx.HTTP_GET ) @@ -60,4 +60,4 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/ssl","nodes":[]}} +{"list":[],"total":0} diff --git a/t/admin/ssls.t b/t/admin/ssls.t new file mode 100644 index 000000000000..675275628443 --- /dev/null +++ b/t/admin/ssls.t @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: test /apisix/admin/ssls/{id} +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local etcd = require("apisix.core.etcd") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/ssls/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed diff --git a/t/admin/stream-routes.t b/t/admin/stream-routes.t index 01062fbc84f8..8710d88eaa04 100644 --- a/t/admin/stream-routes.t +++ b/t/admin/stream-routes.t @@ -46,21 +46,18 @@ __DATA__ "desc": "new route" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "desc": "test-desc", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "desc": "test-desc", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new route" + "type": "roundrobin" }, - "key": "/apisix/stream_routes/1" + "desc": "new route" }, - "action": "set" + "key": "/apisix/stream_routes/1" }]] ) @@ -93,20 +90,17 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new route" + "type": "roundrobin" }, - "key": "/apisix/stream_routes/1" + "desc": "new route" }, - "action": "get" + "key": "/apisix/stream_routes/1" }]] ) @@ -128,13 +122,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/stream_routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -166,19 +154,16 @@ GET /t "desc": "new route" }]], [[{ - "node": { - "value": { - "remote_addr": "127.0.0.1", - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 }, - "desc": "new route" - } - }, - "action": "create" + "type": "roundrobin" + }, + "desc": "new route" + } }]] ) @@ -190,7 +175,7 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/stream_routes/" + 1) + local id = string.sub(res.key, #"/apisix/stream_routes/" + 1) local ret = assert(etcd.get('/stream_routes/' .. id)) local create_time = ret.body.node.value.create_time @@ -200,13 +185,7 @@ GET /t id = ret.body.node.value.id assert(id ~= nil, "id is nil") - code, message = t('/apisix/admin/stream_routes/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/stream_routes/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -232,12 +211,19 @@ GET /t "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "ip": "127.0.0.1", - "port": 1980 - } + "protocol_level": 4 } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] } }]] ) @@ -271,12 +257,19 @@ passed "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "ip": "127.0.0.1", - "port": 1980 - } + "protocol_level": 4 } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] } }]] ) @@ -301,13 +294,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/stream_routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -357,9 +344,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', - ngx.HTTP_DELETE - ) + local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code end @@ -433,7 +418,7 @@ GET /t res = json.decode(res) -- clean data - local id = string.sub(res.node.key, #"/apisix/stream_routes/" + 1) + local id = string.sub(res.key, #"/apisix/stream_routes/" + 1) local code, message = t('/apisix/admin/stream_routes/' .. id, ngx.HTTP_DELETE ) @@ -444,16 +429,19 @@ GET /t return end - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} --- request GET /t --- no_error_log @@ -487,13 +475,15 @@ GET /t end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} --- request GET /t --- no_error_log @@ -518,17 +508,19 @@ GET /t end res = json.decode(res) - assert(res.count ~= nil) - assert(res.node.value.create_time ~= nil) - assert(res.node.value.update_time ~= nil) - res.count = nil - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}}} +{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} --- request GET /t --- no_error_log @@ -557,7 +549,7 @@ GET /t } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/stream_routes/1","node":{}} +{"deleted":"1","key":"/apisix/stream_routes/1"} --- request GET /t --- no_error_log diff --git a/t/admin/upstream-array-nodes.t b/t/admin/upstream-array-nodes.t index 16855526c1c7..e7220c13d851 100644 --- a/t/admin/upstream-array-nodes.t +++ b/t/admin/upstream-array-nodes.t @@ -43,21 +43,18 @@ __DATA__ "desc": "new upstream" }]], [[{ - "node": { - "value": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin", - "desc": "new upstream" - }, - "key": "/apisix/upstreams/1" + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -81,21 +78,18 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin", - "desc": "new upstream" - }, - "key": "/apisix/upstreams/1" + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" }, - "action": "get" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -115,13 +109,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -139,13 +127,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/upstreams/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/upstreams/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -175,19 +157,16 @@ GET /t "type": "roundrobin" }]], [[{ - "node": { - "value": { - "nodes": [{ - "host": "127.0.0.1", - "port": 8080, - "weight": 1 - }], - "type": "roundrobin" - } - }, - "action": "create" + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -197,14 +176,8 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/upstreams/" + 1) - code, message = t('/apisix/admin/upstreams/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local id = string.sub(res.key, #"/apisix/upstreams/" + 1) + code, message = t('/apisix/admin/upstreams/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -230,7 +203,7 @@ GET /t "nodes": [], "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -263,7 +236,7 @@ passed "upstream_id": "1", "uri": "/index.html" }]] - ) + ) if code >= 300 then ngx.status = code @@ -310,7 +283,7 @@ no valid upstream node "_service_name": "xyz", "_discovery_type": "nacos" }]] - ) + ) ngx.status = code ngx.say(body) @@ -341,7 +314,7 @@ passed }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -373,7 +346,7 @@ GET /t }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -405,7 +378,7 @@ GET /t }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -437,7 +410,7 @@ GET /t }], "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -473,7 +446,7 @@ GET /t }, "uri": "/index.html" }]] - ) + ) ngx.status = code ngx.say(body) diff --git a/t/admin/upstream.t b/t/admin/upstream.t index 16bfb5157b7b..12681780cd17 100644 --- a/t/admin/upstream.t +++ b/t/admin/upstream.t @@ -43,19 +43,16 @@ so that we can delete it later) "desc": "new upstream" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/admin_up" + "type": "roundrobin", + "desc": "new upstream" }, - "action": "set" + "key": "/apisix/upstreams/admin_up" }]] - ) + ) ngx.status = code ngx.say(body) @@ -85,19 +82,16 @@ passed ngx.HTTP_GET, nil, [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/admin_up" + "type": "roundrobin", + "desc": "new upstream" }, - "action": "get" + "key": "/apisix/upstreams/admin_up" }]] - ) + ) ngx.status = code ngx.say(body) @@ -117,13 +111,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/admin_up', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/upstreams/admin_up', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -141,13 +129,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code = t('/apisix/admin/upstreams/not_found', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code = t('/apisix/admin/upstreams/not_found', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code) } @@ -176,17 +158,14 @@ GET /t "type": "roundrobin" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - } - }, - "action": "create" + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } }]] - ) + ) if code ~= 200 then ngx.status = code @@ -196,20 +175,14 @@ GET /t ngx.say("[push] code: ", code, " message: ", message) - local id = string.sub(res.node.key, #"/apisix/upstreams/" + 1) + local id = string.sub(res.key, #"/apisix/upstreams/" + 1) local res = assert(etcd.get('/upstreams/' .. id)) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time assert(update_time ~= nil, "update_time is nil") - code, message = t('/apisix/admin/upstreams/' .. id, - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + code, message = t('/apisix/admin/upstreams/' .. id, ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -236,7 +209,7 @@ GET /t }, "type": "roundrobin" }]] - ) + ) ngx.exit(code) } @@ -263,7 +236,7 @@ GET /t }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -294,18 +267,15 @@ GET /t "type": "roundrobin" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -334,7 +304,7 @@ passed }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -364,7 +334,7 @@ GET /t }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -396,7 +366,7 @@ GET /t "_service_name": "xyz", "_discovery_type": "nacos" }]] - ) + ) ngx.status = code ngx.say(body) @@ -426,19 +396,16 @@ passed "type": "chash" }]], [[{ - "node": { - "value": { - "key": "remote_addr", - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "chash" + "value": { + "key": "remote_addr", + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "chash" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -467,7 +434,7 @@ passed }, "type": "unknown" }]] - ) + ) ngx.status = code ngx.print(body) @@ -496,7 +463,7 @@ passed }, "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -518,15 +485,15 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "id": 1, "nodes": { "127.0.0.1:8080": -100 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -548,14 +515,14 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.print(body) @@ -577,14 +544,14 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -606,15 +573,15 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams', - ngx.HTTP_POST, - [[{ + ngx.HTTP_POST, + [[{ "id": 1, "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.print(body) @@ -651,9 +618,9 @@ GET /t } } local code, body = t.test('/apisix/admin/upstreams', - ngx.HTTP_POST, - core.json.encode(data) - ) + ngx.HTTP_POST, + core.json.encode(data) + ) ngx.status = code ngx.print(body) @@ -686,9 +653,9 @@ qr/{"error_msg":"invalid configuration: property \\\"tls\\\" validation failed: } } local code, body = t.test('/apisix/admin/upstreams', - ngx.HTTP_POST, - core.json.encode(data) - ) + ngx.HTTP_POST, + core.json.encode(data) + ) ngx.status = code ngx.print(body) @@ -717,7 +684,7 @@ GET /t cert = ssl_cert, key = ssl_key } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/admin/upstream2.t b/t/admin/upstream2.t index 7f22d1149052..618861c74310 100644 --- a/t/admin/upstream2.t +++ b/t/admin/upstream2.t @@ -61,16 +61,19 @@ __DATA__ end res = json.decode(res) - res.node.key = nil - res.node.value.create_time = nil - res.node.value.update_time = nil - assert(res.node.value.id ~= nil) - res.node.value.id = nil + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"create","node":{"value":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"value":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -81,14 +84,14 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -97,13 +100,15 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"set","node":{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -114,14 +119,14 @@ __DATA__ local json = require("toolkit.json") local t = require("lib.test_admin").test local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_PATCH, - [[{ + ngx.HTTP_PATCH, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -130,13 +135,15 @@ __DATA__ end res = json.decode(res) - res.node.value.create_time = nil - res.node.value.update_time = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"compareAndSwap","node":{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -146,9 +153,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_GET - ) + local code, message, res = t('/apisix/admin/upstreams/unwanted', ngx.HTTP_GET) if code >= 300 then ngx.status = code @@ -157,18 +162,19 @@ __DATA__ end res = json.decode(res) - local value = res.node.value - assert(value.create_time ~= nil) - value.create_time = nil - assert(value.update_time ~= nil) - value.update_time = nil - assert(res.count ~= nil) - res.count = nil + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil ngx.say(json.encode(res)) } } --- response_body -{"action":"get","node":{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} @@ -178,9 +184,7 @@ __DATA__ content_by_lua_block { local json = require("toolkit.json") local t = require("lib.test_admin").test - local code, message, res = t('/apisix/admin/upstreams/unwanted', - ngx.HTTP_DELETE - ) + local code, message, res = t('/apisix/admin/upstreams/unwanted', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code @@ -193,7 +197,7 @@ __DATA__ } } --- response_body -{"action":"delete","deleted":"1","key":"/apisix/upstreams/unwanted","node":{}} +{"deleted":"1","key":"/apisix/upstreams/unwanted"} @@ -204,12 +208,12 @@ __DATA__ local core = require("apisix.core") local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": {}, "type": "roundrobin" }]] - ) + ) if code >= 300 then ngx.status = code @@ -238,7 +242,7 @@ passed "upstream_id": "1", "uri": "/index.html" }]] - ) + ) if code >= 300 then ngx.status = code @@ -269,8 +273,8 @@ no valid upstream node content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, @@ -281,7 +285,7 @@ no valid upstream node "read": 0 } }]] - ) + ) ngx.status = code ngx.print(body) } diff --git a/t/admin/upstream3.t b/t/admin/upstream3.t index 070c8b3d2368..e40e24e99b4a 100644 --- a/t/admin/upstream3.t +++ b/t/admin/upstream3.t @@ -60,7 +60,7 @@ __DATA__ } } --- response_body -{"action":"get","count":0,"node":{"dir":true,"key":"/apisix/upstreams","nodes":[]}} +{"list":[],"total":0} @@ -139,17 +139,14 @@ __DATA__ "desc": "new upstream" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -179,17 +176,14 @@ passed "desc": "new 21 upstream" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new 21 upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new 21 upstream" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -216,16 +210,14 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1, - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 4 - }, - "type": "roundrobin", - "desc": "new 21 upstream" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin", + "desc": "new 21 upstream" } }]] ) @@ -253,15 +245,13 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8081": 3, - "127.0.0.1:8082": 0 - }, - "type": "roundrobin", - "desc": "new 21 upstream" - } + "value": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 0 + }, + "type": "roundrobin", + "desc": "new 21 upstream" } }]] ) @@ -290,17 +280,14 @@ passed "desc": "new upstream 24" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream 24" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream 24" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -322,17 +309,14 @@ passed ngx.HTTP_PATCH, '"new 25 upstream"', [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new 25 upstream" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new 25 upstream" }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -357,15 +341,13 @@ passed "127.0.0.7:8082": 4 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.6:8081": 3, - "127.0.0.7:8082": 4 - }, - "type": "roundrobin", - "desc": "new 25 upstream" - } + "value": { + "nodes": { + "127.0.0.6:8081": 3, + "127.0.0.7:8082": 4 + }, + "type": "roundrobin", + "desc": "new 25 upstream" } }]] ) @@ -391,15 +373,13 @@ passed "127.0.0.8:8082": 4 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.7:8081": 0, - "127.0.0.8:8082": 4 - }, - "type": "roundrobin", - "desc": "new 25 upstream" - } + "value": { + "nodes": { + "127.0.0.7:8081": 0, + "127.0.0.8:8082": 4 + }, + "type": "roundrobin", + "desc": "new 25 upstream" } }]] ) @@ -419,15 +399,15 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "server_name", "nodes": { "127.0.0.1:8080": 1 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.say(body) @@ -454,7 +434,7 @@ passed "key": "not_support", "desc": "new upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -472,8 +452,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "arg_device_id", "nodes": { "127.0.0.1:8080": 1 @@ -481,7 +461,7 @@ passed "type": "chash", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -498,15 +478,15 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "server_name", "nodes": { "127.0.0.1:8080": 1 }, "type": "chash" }]] - ) + ) ngx.status = code ngx.say(body) @@ -533,7 +513,7 @@ passed "key": "not_support", "desc": "new upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -560,7 +540,7 @@ passed "type": "chash", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -577,8 +557,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "arg_device_id", "nodes": { "127.0.0.1:8080": 1 @@ -587,7 +567,7 @@ passed "hash_on": "vars", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -604,8 +584,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "custom_header", "nodes": { "127.0.0.1:8080": 1 @@ -614,7 +594,7 @@ passed "hash_on": "header", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -631,8 +611,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "$#^@", "nodes": { "127.0.0.1:8080": 1 @@ -641,7 +621,7 @@ passed "hash_on": "header", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -659,8 +639,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "custom_cookie", "nodes": { "127.0.0.1:8080": 1 @@ -669,7 +649,7 @@ passed "hash_on": "cookie", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -686,8 +666,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "$#^@abc", "nodes": { "127.0.0.1:8080": 1 @@ -696,7 +676,7 @@ passed "hash_on": "cookie", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.print(body) @@ -714,8 +694,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, @@ -723,7 +703,7 @@ passed "hash_on": "consumer", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -740,8 +720,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, @@ -750,7 +730,7 @@ passed "key": "invalid-key", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.say(body) @@ -767,8 +747,8 @@ passed content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "key": "dsadas", "nodes": { "127.0.0.1:8080": 1 @@ -777,7 +757,7 @@ passed "hash_on": "aabbcc", "desc": "new chash upstream" }]] - ) + ) ngx.status = code ngx.print(body) diff --git a/t/admin/upstream4.t b/t/admin/upstream4.t index 1f55fc92ea46..99c840f944f1 100644 --- a/t/admin/upstream4.t +++ b/t/admin/upstream4.t @@ -53,17 +53,14 @@ __DATA__ "name": "test upstream name" }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "name": "test upstream name" + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "name": "test upstream name" }, - "action": "set" + "key": "/apisix/upstreams/1" }]] ) @@ -106,9 +103,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', - ngx.HTTP_DELETE - ) + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code end @@ -201,40 +196,14 @@ passed -=== TEST 7: invalid route: multi nodes with `node` mode to pass host ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ - "nodes": { - "httpbin.org:8080": 1, - "test.com:8080": 1 - }, - "type": "roundrobin", - "pass_host": "node" - }]] - ) - - ngx.status = code - ngx.print(body) - } - } ---- skip_nginx: 5: > 1.19.0 ---- error_code: 400 - - - -=== TEST 8: invalid route: empty `upstream_host` when `pass_host` is `rewrite` +=== TEST 7: invalid route: empty `upstream_host` when `pass_host` is `rewrite` --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "httpbin.org:8080": 1, "test.com:8080": 1 @@ -243,7 +212,7 @@ passed "pass_host": "rewrite", "upstream_host": "" }]] - ) + ) ngx.status = code ngx.print(body) @@ -253,7 +222,7 @@ passed -=== TEST 9: set upstream(with labels) +=== TEST 8: set upstream(with labels) --- config location /t { content_by_lua_block { @@ -272,23 +241,20 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "labels": { - "build":"16", - "env":"production", - "version":"v2" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "labels": { + "build":"16", + "env":"production", + "version":"v2" + } }, - "action": "set" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -299,30 +265,27 @@ passed -=== TEST 10: get upstream(with labels) +=== TEST 9: get upstream(with labels) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_GET, - nil, + ngx.HTTP_GET, + nil, [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "labels": { - "version":"v2", - "build":"16", - "env":"production" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "labels": { + "version":"v2", + "build":"16", + "env":"production" + } }, - "action": "get" + "key": "/apisix/upstreams/1" }]] ) @@ -335,7 +298,7 @@ passed -=== TEST 11: patch upstream(only labels) +=== TEST 10: patch upstream(only labels) --- config location /t { content_by_lua_block { @@ -348,23 +311,20 @@ passed } }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "labels": { - "version":"v2", - "build":"17", - "env":"production" - } + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "labels": { + "version":"v2", + "build":"17", + "env":"production" + } }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -375,7 +335,7 @@ passed -=== TEST 12: invalid format of label value: set upstream +=== TEST 11: invalid format of label value: set upstream --- config location /t { content_by_lua_block { @@ -391,7 +351,7 @@ passed "env": ["production", "release"] } }]] - ) + ) ngx.status = code ngx.print(body) @@ -403,7 +363,7 @@ passed -=== TEST 13: patch upstream(whole, create_time) +=== TEST 12: patch upstream(whole, create_time) --- config location /t { content_by_lua_block { @@ -421,18 +381,15 @@ passed "create_time": 1705252779 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream", - "create_time": 1705252779 + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -453,7 +410,7 @@ passed -=== TEST 14: patch upstream(whole, update_time) +=== TEST 13: patch upstream(whole, update_time) --- config location /t { content_by_lua_block { @@ -471,18 +428,15 @@ passed "update_time": 1705252779 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "desc": "new upstream", - "create_time": 1705252779 + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/1" + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 }, - "action": "compareAndSwap" + "key": "/apisix/upstreams/1" }]] ) @@ -503,7 +457,7 @@ passed -=== TEST 15: create upstream with create_time and update_time +=== TEST 14: create upstream with create_time and update_time --- config location /t { content_by_lua_block { @@ -519,20 +473,17 @@ passed "update_time": 1602893670 }]], [[{ - "node": { - "value": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin", - "create_time": 1602883670, - "update_time": 1602893670 + "value": { + "nodes": { + "127.0.0.1:8080": 1 }, - "key": "/apisix/upstreams/up_create_update_time" + "type": "roundrobin", + "create_time": 1602883670, + "update_time": 1602893670 }, - "action": "set" + "key": "/apisix/upstreams/up_create_update_time" }]] - ) + ) ngx.status = code ngx.say(body) @@ -543,18 +494,12 @@ passed -=== TEST 16: delete test upstream +=== TEST 15: delete test upstream --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/up_create_update_time', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + local code, message = t('/apisix/admin/upstreams/up_create_update_time', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -563,7 +508,7 @@ passed -=== TEST 17: patch upstream with sub_path, the data is number +=== TEST 16: patch upstream with sub_path, the data is number --- config location /t { content_by_lua_block { @@ -589,8 +534,8 @@ passed ngx.sleep(1) local code, message = t('/apisix/admin/upstreams/1/retries', - ngx.HTTP_PATCH, - json.encode(1) + ngx.HTTP_PATCH, + json.encode(1) ) if code >= 300 then ngx.status = code @@ -606,20 +551,20 @@ passed -=== TEST 18: set upstream(id: 1) +=== TEST 17: set upstream(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "nodes": { "127.0.0.1:8080": 1 }, "type": "roundrobin" }]] - ) + ) ngx.status = code ngx.say(body) @@ -630,17 +575,17 @@ passed -=== TEST 19: set service(id: 1) +=== TEST 18: set service(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream_id": 1 }]] - ) + ) if code >= 300 then ngx.status = code @@ -653,18 +598,18 @@ passed -=== TEST 20: set route(id: 1) +=== TEST 19: set route(id: 1) --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "upstream_id": 1, "uri": "/index.html" }]] - ) + ) if code >= 300 then ngx.status = code @@ -677,17 +622,13 @@ passed -=== TEST 21: delete upstream(id: 1) +=== TEST 20: delete upstream(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) ngx.print("[delete] code: ", code, " message: ", message) } } @@ -696,17 +637,13 @@ passed -=== TEST 22: delete route(id: 1) +=== TEST 21: delete route(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -715,17 +652,13 @@ passed -=== TEST 23: delete service(id: 1) +=== TEST 22: delete service(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/services/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -734,17 +667,13 @@ passed -=== TEST 24: delete upstream(id: 1) +=== TEST 23: delete upstream(id: 1) --- config location /t { content_by_lua_block { ngx.sleep(0.3) local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] - ) + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) ngx.say("[delete] code: ", code, " message: ", message) } } diff --git a/t/bin/gen_snippet.lua b/t/bin/gen_snippet.lua new file mode 100755 index 000000000000..085409b6b5ae --- /dev/null +++ b/t/bin/gen_snippet.lua @@ -0,0 +1,51 @@ +#!/usr/bin/env luajit +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- this script generates Nginx configuration in the test +-- so we can test some features with test-nginx +local pkg_cpath_org = package.cpath +local pkg_path_org = package.path +local pkg_cpath = "deps/lib64/lua/5.1/?.so;deps/lib/lua/5.1/?.so;" +local pkg_path = "deps/share/lua/5.1/?.lua;" +-- modify the load path to load our dependencies +package.cpath = pkg_cpath .. pkg_cpath_org +package.path = pkg_path .. pkg_path_org + + +local file = require("apisix.cli.file") +local schema = require("apisix.cli.schema") +local snippet = require("apisix.cli.snippet") +local yaml_conf, err = file.read_yaml_conf("t/servroot") +if not yaml_conf then + error(err) +end +local ok, err = schema.validate(yaml_conf) +if not ok then + error(err) +end + +local res, err +if arg[1] == "conf_server" then + res, err = snippet.generate_conf_server( + {apisix_home = "t/servroot/"}, + yaml_conf) +end + +if not res then + error(err or "none") +end +print(res) diff --git a/t/certs/localhost_slapd_cert.pem b/t/certs/localhost_slapd_cert.pem new file mode 100644 index 000000000000..6140ea5f630c --- /dev/null +++ b/t/certs/localhost_slapd_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIECDCCAnCgAwIBAgIUc40/PofbLcrqu/2MJMEkYfrxB+4wDQYJKoZIhvcNAQEL +BQAwVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMMCHRlc3QuY29tMB4XDTIy +MDgwMjA1NDI1OFoXDTIzMDgwMjA1NDI1OFowLjESMBAGA1UEAxMJbG9jYWxob3N0 +MRgwFgYDVQQKEw9FeGFtcGxlIENvbXBhbnkwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQCxE5zfta69uPsQVDiV0OwWHDGxTBYNzmp5zsVwOF3bOH+hyB4M ++qFxPEuH84/Ib4GJdLM67qZth1azHudKy/QGPFkoeFUW1JhB9QGyjh/URwxTy05b +Ce5w7Ee1rMV/GWu6fxMfIE3o5U0XuW1IKQFaZVdNuQlvG4VjL59BfnEF+YXb1QDB +kIpvf59q+UuZgit8CrO1dDYeJ/xO3N9v2CS2u6si9/XWgIwayw67tmb7cbTu/srB +C99w97IMP5/Vkeu6fkg2jTuvCRARzMQJ11krDmtGeYum9SSCdyTLxK1u7w33DuhQ +3HE/PfHJj9QV1MKIeruVjEvawJsRiWQG0Ai7AgMBAAGjdjB0MAwGA1UdEwEB/wQC +MAAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0PAQH/BAUDAwegADAdBgNVHQ4E +FgQUcGOrPCoztq5Z7mjgGtaCkPkmDWowHwYDVR0jBBgwFoAUmbUr1fJgcJdG6ZLx +bYMojlFHG7MwDQYJKoZIhvcNAQELBQADggGBABNOTIiLHNQJfyV20UxcyzZ9xTuc +DuMzEexWJ6S33yJTyp5jni0vFaF9wnT1MOtp+Zizz0hQq0d+GvsmBzjkDdipFqUB +Dt4517l4Z/H4n4FV0jhqQhhzcPRWI5H2MNU0Ezno1iCaKD29Kq61fo2qrU7SNDre +RjnGueTW6u+YLj1ss+UK2rTCRX/Nqqz+MrvIift5Kj4c/8sAD3Zn2aXlH0dXSTcX +DaqNDPQvcdlqNMRSJSthLXYBn40Ro6mH7uA+e4aIVn4jyYvyb8qY5LhQPesTcJZw +IEDmIgFEIh0k1YoGvLD6TkMdKPUG536zH+4iZjKpwGwNQ/dTBgn4+5UOqguiYgXd +MP/eeXSCGLAIjQ4+i1ghv1eAlHuHSQ3Dm75icpAL7VHFdoI7I3wqeE5+IyrUXjX0 +s1bCjIuwGxgoBBTzv25OijmTmMcLYDp04PR5qSwckvsrrxHr+2ujeqS+AGxzZ4Sk +N1JSJL69zUwfCVdE3mR+6OmmDcuVlB3u+grLFQ== +-----END CERTIFICATE----- diff --git a/t/certs/localhost_slapd_key.pem b/t/certs/localhost_slapd_key.pem new file mode 100644 index 000000000000..fa33248c6240 --- /dev/null +++ b/t/certs/localhost_slapd_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsROc37Wuvbj7EFQ4ldDsFhwxsUwWDc5qec7FcDhd2zh/ocge +DPqhcTxLh/OPyG+BiXSzOu6mbYdWsx7nSsv0BjxZKHhVFtSYQfUBso4f1EcMU8tO +WwnucOxHtazFfxlrun8THyBN6OVNF7ltSCkBWmVXTbkJbxuFYy+fQX5xBfmF29UA +wZCKb3+favlLmYIrfAqztXQ2Hif8Ttzfb9gktrurIvf11oCMGssOu7Zm+3G07v7K +wQvfcPeyDD+f1ZHrun5INo07rwkQEczECddZKw5rRnmLpvUkgncky8Stbu8N9w7o +UNxxPz3xyY/UFdTCiHq7lYxL2sCbEYlkBtAIuwIDAQABAoIBAGDANpaEzlUbHRJu +8fvpixUJkp0s1V/1yHeFYptOMPn2hMYAcWrmBg+4wgwmKAl742sXOFaazpRJvjVg +TT+w8EP39T8HgHZY8lgXZjYJMZrqtvGRw946Lu3EK+o33DD10sazZ98551e48cZk +qjEjNnoNpQXydBUhFGB9RKakT1zTb8e+ZQdsrE+ZzgM9/xVFRx4gsfNbed/5TMHZ +QbwaqPzQRiS9ScRwvZ+TE20cGQ66qZqR6+JCatc8BpXA9Q6ZmTj61MSl6MMzCuOS +yIGm5J+siPkLV/ki+MAHk59G9iEsTjS1T1l4aQn0kTtdMx9oVCPODY6Jdi8jIaU/ +TwGWuQECgYEAxJEg/YKjZGQFhidP64OGi1ochFZxuJFwcZ17DgmZPkiU+vpC8KYl +QpR0r0zN9vqP+71nMMoVJfektXRMP4cy0ebSAbx47X5IfdYUhID+/OAlxbl1O9ah +lGWk90zknVvQKahImtYZqepQEYyetQiDB4gX2bLT+8IIt16ebGC/TyUCgYEA5p3g +Tcj69nxyy4BuGxYuNfTORTCzd9zhURN7325HVBMlhen/f1e+yjV1zth9yLDl5Wyl +99jkVCvy6p83s+1EDKdgOTYrxgD31Y934De/m53U6P/yHeic3z9dIgIAn+qcJqU6 +CL28lXEV8jKLNmlR0crWSjtSBDIpA3BWWN834l8CgYAxgcPnVZHFZROnGBue2391 +dXqdMhBuReMmGl21yWEZOLqdA478gTv9KtrAk/2D6NN+udNVjHALIfYP5XyWu3xn +NVVLLqbeWeH0H4kHXl3aXrHkvLL0ITiM4ZTM3EbwAwHInCO9K5NHIkaMRPhr6/rk +WLh5Efsl+1aqqGAKN8u3KQKBgFDjcUh3RSdtkSo12ujfR8gfHLaCFYDmVZWFev5s +hNJFgPTOlZJJ6Z6tT6wEnWHmQkzNZg1f4v5vB94piHUwtJynnIWUrZfewQ8EKmzX +wPpJSuOK2paI/3UCmZ0TDLsKpEidzZRBUMMuDh+MgO3N1Sf7uFwDIIpeOap+HZtA +eC6LAoGAFaN/0hr3kBCGGUQ0MKSEw1A4jJntR+Enz5+vJ1F/yW7E3SNp5gHz8sF1 +ppt3OZKtZeIoaCapIEr4hRZzzZr2zNHu3tyizscLAdcqKbt2o7OlPK7Z5mhREN8E +F4obLQI+YsAv2aOY2EFTSPq70N2OL45NLsdq3igpKZEIbpUgnwA= +-----END RSA PRIVATE KEY----- diff --git a/t/chaos/delayetcd/delayetcd.go b/t/chaos/delayetcd/delayetcd.go index a8245e6a4099..4cb1c9ed3bcb 100644 --- a/t/chaos/delayetcd/delayetcd.go +++ b/t/chaos/delayetcd/delayetcd.go @@ -100,6 +100,8 @@ func deleteChaosAndCheck(eSilent *httpexpect.Expect, cliSet *utils.ClientSet, ch var _ = ginkgo.Describe("Test APISIX Delay When Add ETCD Delay", func() { ctx := context.Background() e := httpexpect.New(ginkgo.GinkgoT(), utils.Host) + eDataPanel := httpexpect.New(ginkgo.GinkgoT(), utils.DataPanelHost) + ePrometheus := httpexpect.New(ginkgo.GinkgoT(), utils.PrometheusHost) eSilent := utils.GetSilentHttpexpectClient() var cliSet *utils.ClientSet @@ -123,8 +125,8 @@ var _ = ginkgo.Describe("Test APISIX Delay When Add ETCD Delay", func() { utils.SetRoute(e, httpexpect.Status2xx) utils.GetRouteList(e, http.StatusOK) - utils.WaitUntilMethodSucceed(e, http.MethodGet, 1) - utils.TestPrometheusEtcdMetric(e, 1) + utils.WaitUntilMethodSucceed(eDataPanel, http.MethodGet, 1) + utils.TestPrometheusEtcdMetric(ePrometheus, 1) }) // get default diff --git a/t/chaos/killetcd/killetcd.go b/t/chaos/killetcd/killetcd.go index bb75db8abcea..b9d6d054f830 100644 --- a/t/chaos/killetcd/killetcd.go +++ b/t/chaos/killetcd/killetcd.go @@ -64,7 +64,8 @@ func getEtcdKillChaos() *v1alpha1.PodChaos { var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { e := httpexpect.New(ginkgo.GinkgoT(), utils.Host) - eSilent := utils.GetSilentHttpexpectClient() + eDataPanel := httpexpect.New(ginkgo.GinkgoT(), utils.DataPanelHost) + ePrometheus := httpexpect.New(ginkgo.GinkgoT(), utils.PrometheusHost) var cliSet *utils.ClientSet var apisixPod *v1.Pod @@ -89,8 +90,8 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { utils.SetRoute(e, httpexpect.Status2xx) utils.GetRouteList(e, http.StatusOK) - utils.WaitUntilMethodSucceed(e, http.MethodGet, 1) - utils.TestPrometheusEtcdMetric(e, 1) + utils.WaitUntilMethodSucceed(eDataPanel, http.MethodGet, 1) + utils.TestPrometheusEtcdMetric(ePrometheus, 1) }) ginkgo.It("run request in background", func() { @@ -99,7 +100,7 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { for { go func() { defer ginkgo.GinkgoRecover() - utils.GetRoute(eSilent, http.StatusOK) + utils.GetRoute(eDataPanel, http.StatusOK) }() time.Sleep(100 * time.Millisecond) stopLoop := false @@ -119,7 +120,7 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { ginkgo.It("get stats before kill etcd", func() { timeStart := time.Now() - bandwidthBefore, durationBefore = utils.GetEgressBandwidthPerSecond(e) + bandwidthBefore, durationBefore = utils.GetEgressBandwidthPerSecond(ePrometheus) bpsBefore = bandwidthBefore / durationBefore gomega.Expect(bpsBefore).NotTo(gomega.BeZero()) @@ -141,10 +142,10 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { ginkgo.It("get stats after kill etcd", func() { timeStart := time.Now() utils.SetRoute(e, httpexpect.Status5xx) - utils.GetRoute(e, http.StatusOK) - utils.TestPrometheusEtcdMetric(e, 0) + utils.GetRoute(eDataPanel, http.StatusOK) + utils.TestPrometheusEtcdMetric(ePrometheus, 0) - bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(e) + bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(ePrometheus) bpsAfter = bandwidthAfter / durationAfter errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) diff --git a/t/chaos/kubernetes/deployment.yaml b/t/chaos/kubernetes/deployment.yaml index 0413db47b664..3076f9c0bd5d 100644 --- a/t/chaos/kubernetes/deployment.yaml +++ b/t/chaos/kubernetes/deployment.yaml @@ -77,7 +77,7 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - image: "apache/apisix:latest" + image: "apache/apisix:alpine-local" imagePullPolicy: IfNotPresent name: apisix-gw-deployment ports: @@ -87,6 +87,9 @@ spec: - containerPort: 9443 name: https protocol: TCP + - containerPort: 9180 + name: admin-port + protocol: TCP readinessProbe: failureThreshold: 6 initialDelaySeconds: 10 diff --git a/t/chaos/kubernetes/service.yaml b/t/chaos/kubernetes/service.yaml index c4406f58728d..f0ffdae60463 100644 --- a/t/chaos/kubernetes/service.yaml +++ b/t/chaos/kubernetes/service.yaml @@ -32,10 +32,10 @@ spec: port: 9443 protocol: TCP targetPort: 9443 - # - name: admin-port - # port: 9180 - # protocol: TCP - # targetPort: 9180 + - name: admin-port + port: 9180 + protocol: TCP + targetPort: 9180 selector: app: apisix-gw type: NodePort diff --git a/t/chaos/utils/Dockerfile b/t/chaos/utils/Dockerfile index 700108283799..3eecfd580a7a 100644 --- a/t/chaos/utils/Dockerfile +++ b/t/chaos/utils/Dockerfile @@ -67,7 +67,7 @@ RUN mkdir -p logs && touch logs/access.log && touch logs/error.log \ ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin -EXPOSE 9080 9443 +EXPOSE 9080 9180 9443 CMD ["sh", "-c", "/usr/bin/apisix init && /usr/bin/apisix init_etcd && /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon off;'"] diff --git a/t/chaos/utils/setup_chaos_utils.sh b/t/chaos/utils/setup_chaos_utils.sh index 9c08fed04052..4b41bb6e3fd8 100755 --- a/t/chaos/utils/setup_chaos_utils.sh +++ b/t/chaos/utils/setup_chaos_utils.sh @@ -34,19 +34,23 @@ modify_config() { DNS_IP=$(kubectl get svc -n kube-system -l k8s-app=kube-dns -o 'jsonpath={..spec.clusterIP}') echo "dns_resolver: - ${DNS_IP} -etcd: - host: - - \"http://etcd.default.svc.cluster.local:2379\" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - \"http://etcd.default.svc.cluster.local:2379\" plugin_attr: prometheus: enable_export_server: false " > ./conf/config.yaml - sed -i -e 's/apisix:latest/apisix:alpine-local/g' t/chaos/kubernetes/deployment.yaml } port_forward() { apisix_pod_name=$(kubectl get pod -l app=apisix-gw -o 'jsonpath={.items[0].metadata.name}') nohup kubectl port-forward svc/apisix-gw-lb 9080:9080 >/dev/null 2>&1 & + nohup kubectl port-forward svc/apisix-gw-lb 9180:9180 >/dev/null 2>&1 & nohup kubectl port-forward $apisix_pod_name 9091:9091 >/dev/null 2>&1 & ps aux | grep '[p]ort-forward' } diff --git a/t/chaos/utils/utils.go b/t/chaos/utils/utils.go index e4db50b329b6..207a7b9c10ad 100644 --- a/t/chaos/utils/utils.go +++ b/t/chaos/utils/utils.go @@ -30,9 +30,13 @@ import ( ) var ( - token = "edd1c9f034335f136f87ad84b625c8f1" - Host = "http://127.0.0.1:9080" - setRouteBody = `{ + token = "edd1c9f034335f136f87ad84b625c8f1" + // TODO: refactor the code. We should move the endpoint from the expect to the http call. + // So we don't need to remember to pass the correct expect. + Host = "http://127.0.0.1:9180" + DataPanelHost = "http://127.0.0.1:9080" + PrometheusHost = "http://127.0.0.1:9080" + setRouteBody = `{ "uri": "/get", "plugins": { "prometheus": {} @@ -168,11 +172,11 @@ func DeleteRoute(e *httpexpect.Expect) *httpexpect.Response { func SetPrometheusMetricsPublicAPI(e *httpexpect.Expect) *httpexpect.Response { return caseCheck(httpTestCase{ - E: e, - Method: http.MethodPut, - Path: "/apisix/admin/routes/metrics", - Headers: map[string]string{"X-API-KEY": token}, - Body: `{ + E: e, + Method: http.MethodPut, + Path: "/apisix/admin/routes/metrics", + Headers: map[string]string{"X-API-KEY": token}, + Body: `{ "uri": "/apisix/prometheus/metrics", "plugins": { "public-api": {} diff --git a/t/cli/test_access_log.sh b/t/cli/test_access_log.sh index 252a931d2692..ad48dcb4c865 100755 --- a/t/cli/test_access_log.sh +++ b/t/cli/test_access_log.sh @@ -57,7 +57,7 @@ if [ $count_test_access_log -eq 0 ]; then fi count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` -if [ $count_access_log_off -eq 4 ]; then +if [ $count_access_log_off -eq 5 ]; then echo "failed: nginx.conf file find access_log off; when enable access log" exit 1 fi @@ -92,7 +92,7 @@ if [ $count_test_access_log -eq 1 ]; then fi count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` -if [ $count_access_log_off -ne 4 ]; then +if [ $count_access_log_off -ne 5 ]; then echo "failed: nginx.conf file doesn't find access_log off; when disable access log" exit 1 fi @@ -151,7 +151,7 @@ rm logs/error.log make init make run -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then @@ -191,7 +191,8 @@ apisix: admin_api_mtls: admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' - port_admin: 9180 + admin_listen: + port: 9180 https_admin: true nginx_config: http: diff --git a/t/cli/test_admin.sh b/t/cli/test_admin.sh index 789a61fb8941..960975417b61 100755 --- a/t/cli/test_admin.sh +++ b/t/cli/test_admin.sh @@ -28,7 +28,8 @@ apisix: admin_api_mtls: admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' - port_admin: 9180 + admin_listen: + port: 9180 https_admin: true " > conf/config.yaml @@ -169,10 +170,11 @@ fi echo "pass: show WARNING message if the user used default token and allow any IP to access" -# port_admin set +# admin_listen set echo ' apisix: - port_admin: 9180 + admin_listen: + port: 9180 ' > conf/config.yaml rm logs/error.log @@ -192,7 +194,7 @@ if grep -E 'using uninitialized ".+" variable while logging request' logs/error. exit 1 fi -echo "pass: uninitialized variable not found during writing access log (port_admin set)" +echo "pass: uninitialized variable not found during writing access log (admin_listen set)" # Admin API can only be used with etcd config_center echo ' @@ -248,7 +250,7 @@ make init make run # initialize node-status public API routes #1 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/node-status \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/node-status \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/apisix/status\", @@ -275,7 +277,7 @@ make init sleep 1 # initialize node-status public API routes #2 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/node-status \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/node-status \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/apisix/status\", diff --git a/t/cli/test_admin_mtls.sh b/t/cli/test_admin_mtls.sh index 7bdb06e431c3..881530a66163 100755 --- a/t/cli/test_admin_mtls.sh +++ b/t/cli/test_admin_mtls.sh @@ -23,7 +23,8 @@ echo ' apisix: - port_admin: 9180 + admin_listen: + port: 9180 https_admin: true admin_api_mtls: diff --git a/t/cli/test_apisix_mirror.sh b/t/cli/test_apisix_mirror.sh index b0547ca265c1..f54d7ddfdd3d 100755 --- a/t/cli/test_apisix_mirror.sh +++ b/t/cli/test_apisix_mirror.sh @@ -32,7 +32,7 @@ make init make run sleep 0.1 -curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { "nodes": { diff --git a/t/cli/test_ci_only.sh b/t/cli/test_ci_only.sh index a440cf255ac2..d7d9f5bd1bbc 100755 --- a/t/cli/test_ci_only.sh +++ b/t/cli/test_ci_only.sh @@ -26,10 +26,14 @@ git checkout conf/config.yaml echo ' -etcd: - host: - - "http://127.0.0.1:3379" - prefix: "/apisix" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:3379" + prefix: "/apisix" ' > conf/config.yaml out=$(make init 2>&1 || true) diff --git a/t/cli/test_deployment_control_plane.sh b/t/cli/test_deployment_control_plane.sh new file mode 100755 index 000000000000..fa7210378da4 --- /dev/null +++ b/t/cli/test_deployment_control_plane.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + cert: t/certs/mtls_server.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "cert_key" is required'; then + echo "failed: should check deployment schema during init" + exit 1 +fi + +echo "passed: should check deployment schema during init" + +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +apisix: + enable_admin: false +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: admin.apisix.dev:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: control_plane should enable Admin API" + exit 1 +fi + +echo "passed: control_plane should enable Admin API" + +# use https +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: admin.apisix.dev:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') + +if [ ! $code -eq 200 ]; then + make stop + echo "failed: could not work with etcd" + exit 1 +fi + +echo "passed: work well with etcd in control plane" + +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/c -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop +if [ ! $code -eq 404 ]; then + echo "failed: should disable request proxy" + exit 1 +fi + +echo "passed: should disable request proxy" diff --git a/t/cli/test_deployment_data_plane.sh b/t/cli/test_deployment_data_plane.sh new file mode 100755 index 000000000000..ef5ef61e37c6 --- /dev/null +++ b/t/cli/test_deployment_data_plane.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# clean etcd data +etcdctl del / --prefix + +# data_plane does not write data to etcd +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 + tls: + verify: false +' > conf/config.yaml + +make run + +sleep 1 + +res=$(etcdctl get / --prefix | wc -l) + +if [ ! $res -eq 0 ]; then + echo "failed: data_plane should not write data to etcd" + exit 1 +fi + +echo "passed: data_plane does not write data to etcd" + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 404 ]; then + echo "failed: data_plane should not enable Admin API" + exit 1 +fi + +echo "passed: data_plane should not enable Admin API" + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 +' > conf/config.yaml + +out=$(make run 2>&1 || true) +make stop +if ! echo "$out" | grep 'failed to load the configuration: https://127.0.0.1:12379: certificate verify failed'; then + echo "failed: should verify certificate by default" + exit 1 +fi + +echo "passed: should verify certificate by default" diff --git a/t/cli/test_deployment_mtls.sh b/t/cli/test_deployment_mtls.sh new file mode 100755 index 000000000000..5fa4c6984a21 --- /dev/null +++ b/t/cli/test_deployment_mtls.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +# use mTLS +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: admin.apisix.dev:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: could not work with etcd" + exit 1 +fi + +echo "passed: work well with etcd in control plane" + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: control_plane + control_plane: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + timeout: 30 + tls: + verify: false + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt +' > conf/config.yaml + +rm logs/error.log +make run +sleep 1 + +make stop + +if grep '\[error\] .\+ https://admin.apisix.dev:22379' logs/error.log; then + echo "failed: work well with control plane in data plane" + exit 1 +fi + +echo "passed: work well with control plane in data plane" diff --git a/t/cli/test_deployment_traditional.sh b/t/cli/test_deployment_traditional.sh index f6d7d62c981b..1dead769bc10 100755 --- a/t/cli/test_deployment_traditional.sh +++ b/t/cli/test_deployment_traditional.sh @@ -19,21 +19,6 @@ . ./t/cli/common.sh -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'invalid deployment traditional configuration: property "etcd" is required'; then - echo "failed: should check deployment schema during init" - exit 1 -fi - -echo "passed: should check deployment schema during init" - # HTTP echo ' deployment: @@ -49,7 +34,7 @@ deployment: make run sleep 1 -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then @@ -77,7 +62,7 @@ deployment: make run sleep 1 -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') make stop if [ ! $code -eq 200 ]; then @@ -104,6 +89,7 @@ deployment: make run sleep 1 +make stop if grep '\[error\]' logs/error.log; then echo "failed: could not connect to etcd with stream enabled" @@ -111,3 +97,75 @@ if grep '\[error\]' logs/error.log; then fi echo "passed: could connect to etcd" + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + - https://127.0.0.1:2379 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'all nodes in the etcd cluster should enable/disable TLS together'; then + echo "failed: should validate etcd host" + exit 1 +fi + +echo "passed: validate etcd host" + +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns + +# etcd mTLS verify +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false + ' > conf/config.yaml + +make run +sleep 1 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: could not work when mTLS is enabled" + exit 1 +fi + +echo "passed: etcd enables mTLS successfully" + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + verify: false + ' > conf/config.yaml + +out=$(make init 2>&1 || echo "ouch") +if ! echo "$out" | grep "bad certificate"; then + echo "failed: apisix should echo \"bad certificate\"" + exit 1 +fi + +echo "passed: certificate verify fail expectedly" diff --git a/t/cli/test_etcd.sh b/t/cli/test_etcd.sh index c417baaec94d..033cab5beb0f 100755 --- a/t/cli/test_etcd.sh +++ b/t/cli/test_etcd.sh @@ -32,13 +32,17 @@ etcdctl --endpoints=127.0.0.1:2379 auth enable etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix echo ' -etcd: - host: - - http://127.0.0.1:2379 - prefix: /apisix - timeout: 30 - user: root - password: apache-api6 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 + user: root + password: apache-api6 ' > conf/config.yaml make init @@ -52,10 +56,10 @@ init_kv=( "/apisix/global_rules/ init_dir" "/apisix/plugin_metadata/ init_dir" "/apisix/plugins/ init_dir" -"/apisix/proto/ init_dir" +"/apisix/protos/ init_dir" "/apisix/routes/ init_dir" "/apisix/services/ init_dir" -"/apisix/ssl/ init_dir" +"/apisix/ssls/ init_dir" "/apisix/stream_routes/ init_dir" "/apisix/upstreams/ init_dir" ) @@ -84,10 +88,14 @@ echo "passed: properly handle the error when connecting to etcd without auth" git checkout conf/config.yaml echo ' -etcd: - host: - - http://127.0.0.1:2389 - prefix: /apisix +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2389 + prefix: /apisix ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -102,10 +110,14 @@ echo "passed: Show retry time info successfully" git checkout conf/config.yaml echo ' -etcd: - host: - - http://127.0.0.1:2389 - prefix: /apisix +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2389 + prefix: /apisix ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -129,13 +141,17 @@ etcdctl --endpoints=127.0.0.1:2379 auth enable etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix echo ' -etcd: - host: - - http://127.0.0.1:2379 - prefix: /apisix - timeout: 30 - user: root - password: apache-api7 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 + user: root + password: apache-api7 ' > conf/config.yaml out=$(make init 2>&1 || true) diff --git a/t/cli/test_etcd_healthcheck.sh b/t/cli/test_etcd_healthcheck.sh index 34ca4d29a632..52b90bc908d2 100755 --- a/t/cli/test_etcd_healthcheck.sh +++ b/t/cli/test_etcd_healthcheck.sh @@ -30,11 +30,15 @@ if [ -z "logs/error.log" ]; then fi echo ' -etcd: - host: - - "http://127.0.0.1:23790" - - "http://127.0.0.1:23791" - - "http://127.0.0.1:23792" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:23790" + - "http://127.0.0.1:23791" + - "http://127.0.0.1:23792" health_check_timeout: '"$HEALTH_CHECK_RETRY_TIMEOUT"' timeout: 2 ' > conf/config.yaml @@ -45,7 +49,7 @@ docker-compose -f ./t/cli/docker-compose-etcd-cluster.yaml up -d make init && make run docker stop ${ETCD_NAME_0} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then echo "failed: apisix got effect when one etcd node out of a cluster disconnected" exit 1 @@ -53,7 +57,7 @@ fi docker start ${ETCD_NAME_0} docker stop ${ETCD_NAME_1} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then echo "failed: apisix got effect when one etcd node out of a cluster disconnected" exit 1 @@ -71,7 +75,7 @@ docker stop ${ETCD_NAME_0} && docker stop ${ETCD_NAME_1} && docker stop ${ETCD_N sleep_till=$(date +%s -d "$DATE + $HEALTH_CHECK_RETRY_TIMEOUT second") -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ $code -eq 200 ]; then echo "failed: apisix not got effect when all etcd nodes disconnected" exit 1 @@ -86,7 +90,7 @@ if [ "$sleep_seconds" -gt 0 ]; then sleep $sleep_seconds fi -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then echo "failed: apisix could not recover when etcd node recover" docker ps diff --git a/t/cli/test_etcd_mtls.sh b/t/cli/test_etcd_mtls.sh index 371330e939a2..d61d6d517c1f 100755 --- a/t/cli/test_etcd_mtls.sh +++ b/t/cli/test_etcd_mtls.sh @@ -25,14 +25,18 @@ exit_if_not_customed_nginx # etcd mTLS verify echo ' -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -44,12 +48,16 @@ fi echo "passed: certificate verify success expectedly" echo ' -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + verify: false ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -65,13 +73,17 @@ echo ' apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -95,13 +107,17 @@ apisix: - addr: 9100 ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key ' > conf/config.yaml out=$(make init 2>&1 || echo "ouch") @@ -132,13 +148,17 @@ echo ' apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key ' > conf/config.yaml rm logs/error.log || true @@ -147,7 +167,7 @@ make run sleep 1 make stop -if ! grep -E 'certificate host mismatch' logs/error.log; then +if ! grep -E 'upstream SSL certificate does not match \"127.0.0.1\" while SSL handshaking to upstream' logs/error.log; then echo "failed: should got certificate host mismatch when use host in etcd.host as sni" exit 1 fi @@ -161,14 +181,18 @@ echo ' apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - sni: "admin.apisix.dev" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + sni: "admin.apisix.dev" ' > conf/config.yaml rm logs/error.log || true @@ -183,4 +207,3 @@ if grep -E 'certificate host mismatch' logs/error.log; then fi echo "passed: specify custom sni instead of using etcd.host" - diff --git a/t/cli/test_etcd_tls.sh b/t/cli/test_etcd_tls.sh index 906a2b91d1b2..39db833f9674 100755 --- a/t/cli/test_etcd_tls.sh +++ b/t/cli/test_etcd_tls.sh @@ -27,10 +27,17 @@ git checkout conf/config.yaml echo ' -etcd: - host: - - "https://127.0.0.1:12379" - prefix: "/apisix" +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + prefix: "/apisix" ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -46,12 +53,16 @@ echo "passed: Show certificate verify failed info successfully" git checkout conf/config.yaml echo ' -etcd: - host: - - "https://127.0.0.1:12379" - tls: - verify: false - prefix: "/apisix" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + prefix: "/apisix" + tls: + verify: false ' > conf/config.yaml out=$(make init 2>&1 || true) diff --git a/t/cli/test_http_config.sh b/t/cli/test_http_config.sh index 20837f65a069..4059ca69a712 100755 --- a/t/cli/test_http_config.sh +++ b/t/cli/test_http_config.sh @@ -39,22 +39,6 @@ echo "passed: define custom shdict" git checkout conf/config.yaml -echo ' -nginx_config: - http: - lua_shared_dicts: - my_dict: 1m -' > conf/config.yaml - -make init - -if ! grep "lua_shared_dict my_dict 1m;" conf/nginx.conf > /dev/null; then - echo "failed: define custom shdict in the old way" - exit 1 -fi - -echo "passed: define custom shdict in the old way" - echo " plugins: - ip-restriction diff --git a/t/cli/test_main.sh b/t/cli/test_main.sh index ea54c53b8425..6a0358405889 100755 --- a/t/cli/test_main.sh +++ b/t/cli/test_main.sh @@ -59,7 +59,9 @@ echo "passed: nginx.conf file contains reuseport configuration" echo " apisix: ssl: - listen_port: 8443 + listen: + - port: 8443 + " > conf/config.yaml make init @@ -87,10 +89,11 @@ apisix: - 9081 - 9082 ssl: - listen_port: - - 9443 - - 9444 - - 9445 + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 " > conf/config.yaml make init @@ -253,9 +256,13 @@ echo "passed: resolve variables wrapped with whitespace" # support environment variables in local_conf echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" ' > conf/config.yaml ETCD_HOST=127.0.0.1 ETCD_PORT=2379 make init @@ -267,9 +274,13 @@ fi # don't override user's envs configuration echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" nginx_config: envs: - ETCD_HOST @@ -288,9 +299,13 @@ if ! grep "env ETCD_HOST;" conf/nginx.conf > /dev/null; then fi echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" nginx_config: envs: - ETCD_HOST=1.1.1.1 @@ -514,7 +529,8 @@ apisix: admin_api_mtls: admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' - port_admin: 9180 + admin_listen: + port: 9180 https_admin: true " > conf/customized_config.yaml diff --git a/t/cli/test_makefile.sh b/t/cli/test_makefile.sh new file mode 100755 index 000000000000..5b1ecd712ada --- /dev/null +++ b/t/cli/test_makefile.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +make run + +echo " +apisix: + enable_admin: true + admin_listen: + ip: 127.0.0.2 + port: 9181 +" > conf/config.yaml + +make reload +make stop + +if ! grep "listen 127.0.0.2:9181;" conf/nginx.conf > /dev/null; then + echo "failed: regenerate nginx conf in 'make reload'" + exit 1 +fi + +echo "passed: regenerate nginx conf in 'make reload'" diff --git a/t/cli/test_prometheus.sh b/t/cli/test_prometheus.sh index eb4ce0300193..15f54f9114ee 100755 --- a/t/cli/test_prometheus.sh +++ b/t/cli/test_prometheus.sh @@ -77,7 +77,7 @@ plugin_attr: IP=127.0.0.1 PORT=9092 make run # initialize prometheus metrics public API route #1 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/metrics1 \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/metrics1 \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/prometheus/metrics\", @@ -152,7 +152,7 @@ plugin_attr: IP=127.0.0.1 PORT=9092 make run # initialize prometheus metrics public API route #2 -code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9080/apisix/admin/routes/metrics2 \ +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/metrics2 \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d "{ \"uri\": \"/apisix/prometheus/metrics\", diff --git a/t/cli/test_prometheus_stream.sh b/t/cli/test_prometheus_stream.sh index 347774b2769c..561b9a820cf5 100755 --- a/t/cli/test_prometheus_stream.sh +++ b/t/cli/test_prometheus_stream.sh @@ -34,7 +34,7 @@ stream_plugins: make run sleep 0.5 -curl -v -k -i -m 20 -o /dev/null -s -X PUT http://127.0.0.1:9080/apisix/admin/stream_routes/1 \ +curl -v -k -i -m 20 -o /dev/null -s -X PUT http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" \ -d '{ "plugins": { diff --git a/t/cli/test_snippet.sh b/t/cli/test_snippet.sh index 0684d6c1f659..ad55151f1c11 100755 --- a/t/cli/test_snippet.sh +++ b/t/cli/test_snippet.sh @@ -25,7 +25,8 @@ echo ' apisix: node_listen: 9080 enable_admin: true - port_admin: 9180 + admin_listen: + port: 9180 stream_proxy: only: false tcp: diff --git a/t/cli/test_standalone.sh b/t/cli/test_standalone.sh index b4e6f3955420..1a66c5bf8c43 100755 --- a/t/cli/test_standalone.sh +++ b/t/cli/test_standalone.sh @@ -65,3 +65,46 @@ if [ ! $code -eq 200 ]; then fi echo "passed: resolve variables in apisix.yaml conf success" + +# configure standalone via deployment +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +' > conf/config.yaml + +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes) +if [ ! $code -eq 404 ]; then + echo "failed: admin API should be disabled automatically" + exit 1 +fi + +echo "passed: admin API should be disabled automatically" + +# support environment variables +echo ' +routes: + - + uri: ${{var_test_path}} + plugins: + proxy-rewrite: + uri: ${{var_test_proxy_rewrite_uri:=/apisix/nginx_status}} + upstream: + nodes: + "127.0.0.1:9091": 1 + type: roundrobin +#END +' > conf/apisix.yaml + +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/test) +if [ ! $code -eq 200 ]; then + echo "failed: resolve variables in apisix.yaml conf failed" + exit 1 +fi + +echo "passed: resolve variables in apisix.yaml conf success" diff --git a/t/cli/test_tls_over_tcp.sh b/t/cli/test_tls_over_tcp.sh index a5a095a52839..566af9418a24 100755 --- a/t/cli/test_tls_over_tcp.sh +++ b/t/cli/test_tls_over_tcp.sh @@ -41,7 +41,7 @@ sleep 0.1 ./utils/create-ssl.py t/certs/mtls_server.crt t/certs/mtls_server.key test.com -curl -k -i http://127.0.0.1:9080/apisix/admin/stream_routes/1 \ +curl -k -i http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d \ '{"upstream":{"nodes":{"127.0.0.1:9101":1},"type":"roundrobin"}}' diff --git a/t/cli/test_upstream_mtls.sh b/t/cli/test_upstream_mtls.sh index a8de39733070..b2b53cfab51d 100755 --- a/t/cli/test_upstream_mtls.sh +++ b/t/cli/test_upstream_mtls.sh @@ -54,7 +54,7 @@ make init make run sleep 0.1 -curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream": { @@ -73,7 +73,7 @@ curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034 } }' -sleep 0.1 +sleep 1 code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) @@ -117,7 +117,7 @@ make init make run sleep 0.1 -curl -k -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/hello", "upstream": { diff --git a/t/cli/test_validate_config.sh b/t/cli/test_validate_config.sh index 216f1d9fb14d..2fe5d40666ae 100755 --- a/t/cli/test_validate_config.sh +++ b/t/cli/test_validate_config.sh @@ -78,7 +78,8 @@ echo ' apisix: node_listen: 9080 enable_admin: true - port_admin: 9180 + admin_listen: + port: 9180 stream_proxy: tcp: - "localhost:9100" @@ -204,9 +205,13 @@ fi echo "passed: check the realip configuration for batch-requests" echo ' -etcd: +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: host: - - 127.0.0.1 + - 127.0.0.1 ' > conf/config.yaml out=$(make init 2>&1 || true) @@ -215,17 +220,4 @@ if ! echo "$out" | grep 'property "host" validation failed'; then exit 1 fi -echo ' -etcd: - prefix: "/apisix/" - host: - - https://127.0.0.1 -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'property "prefix" validation failed'; then - echo "failed: should check etcd schema during init" - exit 1 -fi - echo "passed: check etcd schema during init" diff --git a/t/config-center-yaml/plugin-configs.t b/t/config-center-yaml/plugin-configs.t index e7a22b7ba074..ab291d65e877 100644 --- a/t/config-center-yaml/plugin-configs.t +++ b/t/config-center-yaml/plugin-configs.t @@ -113,7 +113,7 @@ routes: --- request GET /echo --- response_body -hello +world --- response_headers in: out --- error_log eval diff --git a/t/config-center-yaml/plugin-metadata.t b/t/config-center-yaml/plugin-metadata.t index 0ad0c6c088e4..6e0a9971e879 100644 --- a/t/config-center-yaml/plugin-metadata.t +++ b/t/config-center-yaml/plugin-metadata.t @@ -33,7 +33,7 @@ _EOC_ $block->set_value("yaml_config", $yaml_config); - if (!$block->no_error_log) { + if (!$block->no_error_log && !$block->error_log) { $block->set_value("no_error_log", "[error]"); } }); @@ -67,3 +67,25 @@ plugin_metadata: GET /hello --- error_log "remote_addr":"127.0.0.1" + + + +=== TEST 2: sanity +--- apisix_yaml +upstreams: + - id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +routes: + - + uri: /hello + upstream_id: 1 +plugin_metadata: + - id: authz-casbin + model: 123 +#END +--- request +GET /hello +--- error_log +failed to check item data of [plugin_metadata] diff --git a/t/config-center-yaml/ssl.t b/t/config-center-yaml/ssl.t index d4745a21f0ba..8d74faff0100 100644 --- a/t/config-center-yaml/ssl.t +++ b/t/config-center-yaml/ssl.t @@ -108,7 +108,7 @@ __DATA__ === TEST 1: sanity --- apisix_yaml -ssl: +ssls: - cert: | -----BEGIN CERTIFICATE----- @@ -181,7 +181,7 @@ server name: "test.com" === TEST 2: single sni --- apisix_yaml -ssl: +ssls: - cert: | -----BEGIN CERTIFICATE----- @@ -252,7 +252,7 @@ server name: "test.com" === TEST 3: bad cert --- apisix_yaml -ssl: +ssls: - cert: | -----BEGIN CERTIFICATE----- diff --git a/t/config-center-yaml/stream-route.t b/t/config-center-yaml/stream-route.t index b6bfabff9aa3..df28a3c0f276 100644 --- a/t/config-center-yaml/stream-route.t +++ b/t/config-center-yaml/stream-route.t @@ -113,9 +113,6 @@ stream_routes: mqtt-proxy: protocol_name: "MQTT" protocol_level: 4 - upstream: - ip: "127.0.0.1" - port: 1995 upstreams: - nodes: "127.0.0.1:1995": 1 diff --git a/t/control/plugin-metadata.t b/t/control/plugin-metadata.t new file mode 100644 index 000000000000..21e784186b22 --- /dev/null +++ b/t/control/plugin-metadata.t @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadatas +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val", + "ikey": 1 + }]] + ) + if code >= 300 then + ngx.status = code + return + end + + local code = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[ + {"log_format": {"upstream_response_time": "$upstream_response_time"}} + ]] + ) + if code >= 300 then + ngx.status = code + return + end + } + } +--- error_code: 200 + + + +=== TEST 2: dump all plugin metadatas +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local _, _, res = t('/v1/plugin_metadatas', ngx.HTTP_GET) + local json = require("toolkit.json") + res = json.decode(res) + for _, metadata in ipairs(res) do + if metadata.id == "file-logger" then + ngx.say("check log_format: ", metadata.log_format.upstream_response_time == "$upstream_response_time") + elseif metadata.id == "example-plugin" then + ngx.say("check skey: ", metadata.skey == "val") + ngx.say("check ikey: ", metadata.ikey == 1) + end + end + } + } +--- response_body +check log_format: true +check skey: true +check ikey: true + + + +=== TEST 3: dump file-logger metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local _, _, res = t('/v1/plugin_metadata/file-logger', ngx.HTTP_GET) + local json = require("toolkit.json") + metadata = json.decode(res) + if metadata.id == "file-logger" then + ngx.say("check log_format: ", metadata.log_format.upstream_response_time == "$upstream_response_time") + end + } + } +--- response_body +check log_format: true + + + +=== TEST 4: plugin without metadata +--- request +GET /v1/plugin_metadata/batch-requests +--- error_code: 404 +--- response_body +{"error_msg":"plugin metadata[batch-requests] not found"} diff --git a/t/control/schema.t b/t/control/schema.t index 5b3c7799720c..ae9c676d7591 100644 --- a/t/control/schema.t +++ b/t/control/schema.t @@ -69,7 +69,11 @@ __DATA__ "schema": { "type":"object", "properties": { - "disable": {"type": "boolean"} + "_meta": { + "properties": { + "disable": {"type": "boolean"} + } + } } }, "metadata_schema": {"type":"object"} @@ -84,7 +88,11 @@ __DATA__ "schema": { "type":"object", "properties": { - "disable": {"type": "boolean"} + "_meta": { + "properties": { + "disable": {"type": "boolean"} + } + } } }, "priority": 1000 diff --git a/t/core/config.t b/t/core/config.t index b87fe1224e0c..29d1cc52dc07 100644 --- a/t/core/config.t +++ b/t/core/config.t @@ -55,12 +55,15 @@ first plugin: "real-ip" } } --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" # etcd address - prefix: "/apisix" # apisix configurations prefix - timeout: 1 - +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" # etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 1 plugins: - example-plugin diff --git a/t/core/config_etcd.t b/t/core/config_etcd.t index 0d6a77989686..5c1d590a14f5 100644 --- a/t/core/config_etcd.t +++ b/t/core/config_etcd.t @@ -29,10 +29,15 @@ __DATA__ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "http://127.0.0.1:7777" -- wrong etcd port - timeout: 1 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - "http://127.0.0.1:7777" -- wrong etcd port + timeout: 1 --- config location /t { content_by_lua_block { @@ -54,9 +59,15 @@ qr/(connection refused){1,}/ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "https://127.0.0.1:2379" + ssl: + ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:2379" --- extra_init_by_lua local health_check = require("resty.etcd.health_check") health_check.get_target_status = function() @@ -73,9 +84,9 @@ end --- request GET /t --- grep_error_log chop -handshake failed +peer closed connection in SSL handshake while SSL handshaking to upstream --- grep_error_log_out eval -qr/(handshake failed){1,}/ +qr/(peer closed connection in SSL handshake while SSL handshaking to upstream){1,}/ @@ -83,9 +94,13 @@ qr/(handshake failed){1,}/ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "http://127.0.0.1:12379" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:12379" --- config location /t { content_by_lua_block { @@ -107,9 +122,15 @@ qr/(closed){1,}/ --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "https://127.0.0.1:12379" + ssl: + ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" --- extra_init_by_lua local health_check = require("resty.etcd.health_check") health_check.get_target_status = function() @@ -126,9 +147,9 @@ end --- request GET /t --- grep_error_log chop -18: self signed certificate +10:certificate has expired --- grep_error_log_out eval -qr/(18: self signed certificate){1,}/ +qr/(10:certificate has expired){1,}/ @@ -137,11 +158,15 @@ qr/(18: self signed certificate){1,}/ apisix: node_listen: 1984 admin_key: null -etcd: - host: - - "https://127.0.0.1:12379" - tls: - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + tls: + verify: false --- config location /t { content_by_lua_block { @@ -159,9 +184,8 @@ etcd: "desc": "new route", "uri": "/index.html" }]] - ) + ) - ngx.status = code ngx.say(body) } } @@ -179,11 +203,15 @@ passed apisix: node_listen: 1984 admin_key: null -etcd: - host: - - "https://127.0.0.1:12379" - tls: - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + tls: + verify: false --- config location /t { content_by_lua_block { @@ -210,12 +238,16 @@ passed --- yaml_config apisix: node_listen: 1984 -etcd: - host: - - "http://127.0.0.1:1980" -- fake server port - timeout: 1 - user: root # root username for etcd - password: 5tHkHhYkjr6cQY # root password for etcd +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:1980" -- fake server port + timeout: 1 + user: root # root username for etcd + password: 5tHkHhYkjr6cQY # root password for etcd --- extra_init_by_lua local health_check = require("resty.etcd.health_check") health_check.get_target_status = function() @@ -248,7 +280,7 @@ etcd auth failed local config = core.config.new() local res = config:getkey("/routes/") if res and res.status == 200 and res.body - and res.body.node and res.body.node.key == "/apisix/routes" then + and res.body.count and tonumber(res.body.count) >= 1 then ngx.say("passed") else ngx.say("failed") diff --git a/t/core/config_util.t b/t/core/config_util.t index 80f01a5d2ae4..2b012fc97be0 100644 --- a/t/core/config_util.t +++ b/t/core/config_util.t @@ -70,3 +70,42 @@ __DATA__ end } } + + + +=== TEST 2: add_clean_handler / cancel_clean_handler / fire_all_clean_handlers +--- config + location /t { + content_by_lua_block { + local util = require("apisix.core.config_util") + local function setup() + local item = {clean_handlers = {}} + local idx1 = util.add_clean_handler(item, function() + ngx.log(ngx.WARN, "fire one") + end) + local idx2 = util.add_clean_handler(item, function() + ngx.log(ngx.WARN, "fire two") + end) + return item, idx1, idx2 + end + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx1, true) + util.cancel_clean_handler(item, idx2, true) + + local item, idx1, idx2 = setup() + util.fire_all_clean_handlers(item) + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx2) + util.fire_all_clean_handlers(item) + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx1) + util.fire_all_clean_handlers(item) + } + } +--- grep_error_log eval +qr/fire \w+/ +--- grep_error_log_out eval +"fire one\nfire two\n" x 3 diff --git a/t/core/etcd-auth-fail.t b/t/core/etcd-auth-fail.t index 708b1d243e34..c85f660dc06d 100644 --- a/t/core/etcd-auth-fail.t +++ b/t/core/etcd-auth-fail.t @@ -62,7 +62,6 @@ __DATA__ } --- request GET /t ---- error_code: 500 --- error_log eval qr /insufficient credentials code: 401/ @@ -80,12 +79,16 @@ qr /insufficient credentials code: 401/ } } --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" - user: apisix - password: abc123 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + user: apisix + password: abc123 --- request GET /t --- error_log eval diff --git a/t/core/etcd-auth.t b/t/core/etcd-auth.t index f2f322db9b47..448893b264ec 100644 --- a/t/core/etcd-auth.t +++ b/t/core/etcd-auth.t @@ -85,12 +85,16 @@ test_value } } --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" - user: apisix - password: abc123 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + user: apisix + password: abc123 --- request GET /t --- no_error_log diff --git a/t/core/etcd-mtls.t b/t/core/etcd-mtls.t index a004aef04711..05b3121f9ffc 100644 --- a/t/core/etcd-mtls.t +++ b/t/core/etcd-mtls.t @@ -24,7 +24,6 @@ if ($out !~ m/function:/) { plan('no_plan'); } - add_block_preprocessor(sub { my ($block) = @_; @@ -39,14 +38,18 @@ __DATA__ === TEST 1: run etcd in init phase --- yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- init_by_lua_block local apisix = require("apisix") apisix.http_init() @@ -90,14 +93,18 @@ init_by_lua:26: 404 === TEST 2: run etcd in init phase (stream) --- yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- stream_init_by_lua_block apisix = require("apisix") apisix.stream_init() @@ -140,14 +147,18 @@ init_by_lua:26: 404 === TEST 3: sync --- extra_yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- config location /t { content_by_lua_block { @@ -196,14 +207,18 @@ waitdir key === TEST 4: sync (stream) --- extra_yaml_config -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false --- stream_server_config content_by_lua_block { local core = require("apisix.core") @@ -245,13 +260,17 @@ waitdir key apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt -etcd: - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key --- init_by_lua_block local apisix = require("apisix") apisix.http_init() diff --git a/t/core/etcd-sync.t b/t/core/etcd-sync.t index a1e674218f91..28a89b21f6cb 100644 --- a/t/core/etcd-sync.t +++ b/t/core/etcd-sync.t @@ -24,9 +24,13 @@ __DATA__ === TEST 1: minus timeout to watch repeatedly --- extra_yaml_config -etcd: - host: - - "http://127.0.0.1:2379" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" --- config location /t { content_by_lua_block { diff --git a/t/core/etcd.t b/t/core/etcd.t index 8d1bdc958920..1b8f25db4ab2 100644 --- a/t/core/etcd.t +++ b/t/core/etcd.t @@ -415,3 +415,23 @@ qr/init_by_lua:\d+: \S+/ init_by_lua:12: ab init_by_lua:19: 200 init_by_lua:26: 404 + + + +=== TEST 8: error handling in server_version +--- config + location /t { + content_by_lua_block { + local etcd_lib = require("resty.etcd") + etcd_lib.new = function() + return nil, "ouch" + end + local etcd = require("apisix.core.etcd") + local res, err = etcd.server_version() + ngx.say(err) + } + } +--- request +GET /t +--- response_body +ouch diff --git a/t/core/os.t b/t/core/os.t index dff6c8b3c191..4c99b311af5d 100644 --- a/t/core/os.t +++ b/t/core/os.t @@ -70,3 +70,22 @@ A false false false + + + +=== TEST 3: usleep, bad arguments +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + for _, c in ipairs({ + {us = 0.1}, + }) do + local ok = pcall(core.os.usleep, c.us) + ngx.say(ok) + end + } + } +--- response_body +false diff --git a/t/core/profile.t b/t/core/profile.t index 663dcf1be457..3e28f9706428 100644 --- a/t/core/profile.t +++ b/t/core/profile.t @@ -32,3 +32,21 @@ __DATA__ --- request GET /t --- error_code: 404 + + + +=== TEST 2: set env "APISIX_PROFILE" to Empty String +--- config + location /t { + content_by_lua_block { + local profile = require("apisix.core.profile") + profile.apisix_home = "./test/" + profile.profile = "" + local local_conf_path = profile:yaml_path("config") + ngx.say(local_conf_path) + } + } +--- request +GET /t +--- response_body +./test/conf/config.yaml diff --git a/t/debug/debug-mode.t b/t/debug/debug-mode.t index d2f629d8edff..bbc1d7457437 100644 --- a/t/debug/debug-mode.t +++ b/t/debug/debug-mode.t @@ -43,9 +43,9 @@ done --- error_log loaded plugin and sort by priority: 23000 name: real-ip loaded plugin and sort by priority: 22000 name: client-control +loaded plugin and sort by priority: 12015 name: request-id loaded plugin and sort by priority: 12011 name: zipkin loaded plugin and sort by priority: 12000 name: ext-plugin-pre-req -loaded plugin and sort by priority: 11010 name: request-id loaded plugin and sort by priority: 11000 name: fault-injection loaded plugin and sort by priority: 10000 name: serverless-pre-function loaded plugin and sort by priority: 4000 name: cors @@ -321,12 +321,19 @@ passed "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "ip": "127.0.0.1", - "port": 1995 - } + "protocol_level": 4 } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] } }]] ) diff --git a/t/deployment/conf_server.t b/t/deployment/conf_server.t new file mode 100644 index 000000000000..cd5353e373f8 --- /dev/null +++ b/t/deployment/conf_server.t @@ -0,0 +1,447 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sync in https +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local consumers, _ = core.config.new("/consumers", { + automatic = true, + item_schema = core.schema.consumer, + }) + + ngx.sleep(0.6) + local idx = consumers.prev_index + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jobs", + "plugins": { + "basic-auth": { + "username": "jobs", + "password": "678901" + } + } + }]]) + + ngx.sleep(2) + local new_idx = consumers.prev_index + if new_idx > idx then + ngx.say("prev_index updated") + else + ngx.say("prev_index not update") + end + } + } +--- response_body +prev_index updated +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - https://127.0.0.1:12379 + tls: + verify: false + + + +=== TEST 2: mix ip & domain +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.2:2379 + - http://localhost:2379 + - http://[::1]:2379 +--- error_log +dns resolve localhost, result: +--- response_body +foo + + + +=== TEST 3: resolve domain, result changed +--- extra_init_by_lua + local resolver = require("apisix.core.resolver") + local old_f = resolver.parse_domain + local counter = 0 + resolver.parse_domain = function (domain) + if domain == "localhost" then + counter = counter + 1 + if counter % 2 == 0 then + return "127.0.0.2" + else + return "127.0.0.3" + end + else + return old_f(domain) + end + end +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + # use localhost so the connection is OK in the situation that the DNS + # resolve is not done in APISIX + - http://localhost:2379 +--- response_body +foo +--- error_log +localhost is resolved to: 127.0.0.3 +localhost is resolved to: 127.0.0.2 +--- no_error_log +[error] + + + +=== TEST 4: update balancer if the DNS result changed +--- extra_init_by_lua + local etcd = require("apisix.core.etcd") + etcd.switch_proxy = function () + return etcd.new() + end + + local resolver = require("apisix.core.resolver") + local old_f = resolver.parse_domain + package.loaded.counter = 0 + resolver.parse_domain = function (domain) + if domain == "x.com" then + local counter = package.loaded.counter + package.loaded.counter = counter + 1 + if counter % 2 == 0 then + return "127.0.0.2" + else + return "127.0.0.3" + end + else + return old_f(domain) + end + end + + local picker = require("apisix.balancer.least_conn") + package.loaded.n_picker = 0 + local old_f = picker.new + picker.new = function (nodes, upstream) + package.loaded.n_picker = package.loaded.n_picker + 1 + return old_f(nodes, upstream) + end +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + local counter = package.loaded.counter + local n_picker = package.loaded.n_picker + if counter == n_picker then + ngx.say("OK") + else + ngx.say(counter, " ", n_picker) + end + } + } +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + - http://x.com:2379 +--- response_body +foo +OK +--- error_log +x.com is resolved to: 127.0.0.3 +x.com is resolved to: 127.0.0.2 + + + +=== TEST 5: retry +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:1979 + - http://[::1]:1979 + - http://localhost:2379 +--- error_log +connect() failed +--- response_body +foo + + + +=== TEST 6: check default SNI +--- http_config +server { + listen 12345 ssl; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + + ssl_certificate_by_lua_block { + local ngx_ssl = require "ngx.ssl" + ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) + } + + location / { + proxy_pass http://127.0.0.1:2379; + } +} +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- response_body +foo +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - https://127.0.0.1:12379 + - https://localhost:12345 + tls: + verify: false +--- error_log +Receive SNI: localhost + + + +=== TEST 7: check configured SNI +--- http_config +server { + listen 12345 ssl; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + + ssl_certificate_by_lua_block { + local ngx_ssl = require "ngx.ssl" + ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) + } + + location / { + proxy_pass http://127.0.0.1:2379; + } +} +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- response_body +foo +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - https://127.0.0.1:12379 + - https://127.0.0.1:12345 + tls: + verify: false + sni: "x.com" +--- error_log +Receive SNI: x.com + + + +=== TEST 8: check Host header +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) + } + proxy_pass http://127.0.0.1:2379; + } +} +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- response_body +foo +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:12345 + - http://localhost:12345 +--- error_log +Receive Host: localhost +Receive Host: 127.0.0.1 + + + +=== TEST 9: check Host header after retry +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) + } + proxy_pass http://127.0.0.1:2379; + } +} +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/apisix/test", "foo")) + local res = assert(etcd.get("/apisix/test")) + ngx.say(res.body.node.value) + } + } +--- response_body +foo +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:1979 + - http://localhost:12345 +--- error_log +Receive Host: localhost + + + +=== TEST 10: default timeout +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + local etcd_cli = require("resty.etcd") + local f = etcd_cli.new + local timeout + etcd_cli.new = function(conf) + timeout = conf.timeout + return f(conf) + end + etcd.new() + ngx.say(timeout) + } + } +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +--- response_body +30 diff --git a/t/deployment/conf_server2.t b/t/deployment/conf_server2.t new file mode 100644 index 000000000000..b8261c80c31b --- /dev/null +++ b/t/deployment/conf_server2.t @@ -0,0 +1,165 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: health check, ensure unhealthy endpoint is skipped +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + if package.loaded.start_to_fail then + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:2379; + } +} +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + - http://localhost:12345 +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + package.loaded.start_to_fail = true + for i = 1, 7 do + assert(etcd.set("/apisix/test", "foo")) + end + package.loaded.start_to_fail = nil + ngx.say('OK') + } + } +--- response_body +OK +--- error_log +report failure, endpoint: localhost:12345 +endpoint localhost:12345 is unhealthy, skipped + + + +=== TEST 2: health check, all endpoints are unhealthy +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + if package.loaded.start_to_fail then + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:2379; + } +} +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://localhost:12345 + - http://127.0.0.1:12345 +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + package.loaded.start_to_fail = true + for i = 1, 6 do + etcd.set("/apisix/test", "foo") + end + package.loaded.start_to_fail = nil + local _, err = etcd.set("/apisix/test", "foo") + ngx.say(err) + } + } +--- response_body +invalid response code: 503 +--- error_log +endpoint localhost:12345 is unhealthy, skipped +endpoint 127.0.0.1:12345 is unhealthy, skipped + + + +=== TEST 3: health check, all endpoints recover from unhealthy +--- http_config +server { + listen 12345; + location / { + access_by_lua_block { + if package.loaded.start_to_fail then + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:2379; + } +} +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + health_check_timeout: 1 + prefix: "/apisix" + host: + - http://localhost:12345 + - http://127.0.0.1:12345 +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + package.loaded.start_to_fail = true + for i = 1, 6 do + etcd.set("/apisix/test", "foo") + end + package.loaded.start_to_fail = nil + ngx.sleep(1.2) + local res, err = etcd.set("/apisix/test", "foo") + ngx.say(err or res.body.node.value) + } + } +--- response_body +foo +--- error_log +endpoint localhost:12345 is unhealthy, skipped +endpoint 127.0.0.1:12345 is unhealthy, skipped diff --git a/t/deployment/mtls.t b/t/deployment/mtls.t new file mode 100644 index 000000000000..8826dd2dd030 --- /dev/null +++ b/t/deployment/mtls.t @@ -0,0 +1,119 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: mTLS for control plane +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:12345/version +--- response_body eval +qr/"etcdserver":/ +--- extra_yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: 0.0.0.0:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt + + + +=== TEST 2: no client certificate +--- exec +curl -k https://localhost:12345/version +--- response_body eval +qr/No required SSL certificate was sent/ +--- extra_yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: 0.0.0.0:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt + + + +=== TEST 3: wrong client certificate +--- exec +curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:12345/version +--- response_body eval +qr/The SSL certificate error/ +--- extra_yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + conf_server: + listen: 0.0.0.0:12345 + cert: t/certs/mtls_server.crt + cert_key: t/certs/mtls_server.key + client_ca_cert: t/certs/mtls_ca.crt + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 + certs: + cert: t/certs/mtls_client.crt + cert_key: t/certs/mtls_client.key + trusted_ca_cert: t/certs/mtls_ca.crt diff --git a/t/error_page/error_page.t b/t/error_page/error_page.t index 70cc34c68af9..d6ec79a0093a 100644 --- a/t/error_page/error_page.t +++ b/t/error_page/error_page.t @@ -136,12 +136,8 @@ X-Test-Status: 500 content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -198,3 +194,58 @@ qr/(stash|fetch) ngx ctx/ --- grep_error_log_out stash ngx ctx fetch ngx ctx + + + +=== TEST 11: check if the phases after proxy are run when 500 happens before proxy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function() if ngx.var.http_x_test_status ~= nil then;ngx.exit(tonumber(ngx.var.http_x_test_status));end;end"] + }, + "serverless-pre-function": { + "phase": "log", + "functions" : ["return function() ngx.log(ngx.WARN, 'run log phase in error_page') end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: hit +--- request +GET /hello +--- more_headers +X-Test-Status: 500 +--- error_code: 500 +--- response_body_like +.*apisix.apache.org.* +--- error_log +run log phase in error_page diff --git a/t/fuzzing/client_abort.py b/t/fuzzing/client_abort.py index 707297eaab64..3a75442a32a5 100755 --- a/t/fuzzing/client_abort.py +++ b/t/fuzzing/client_abort.py @@ -24,7 +24,7 @@ from public import check_leak, run_test def create_route(): - command = '''curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/client_abort", "upstream": { diff --git a/t/fuzzing/http_upstream.py b/t/fuzzing/http_upstream.py new file mode 100755 index 000000000000..877f298cdd5e --- /dev/null +++ b/t/fuzzing/http_upstream.py @@ -0,0 +1,89 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file provides a fuzzing test with different upstreams +import http.client +import json +import random +import threading +from public import check_leak, run_test, connect_admin + + +REQ_PER_THREAD = 50 +THREADS_NUM = 4 +TOTOL_ROUTES = 10 + + +def create_route(): + for i in range(TOTOL_ROUTES): + conn = connect_admin() + scheme = "http" if i % 2 == 0 else "https" + port = ":6666" if i % 2 == 0 else ":6667" + suffix = str(i + 1) + i = str(i) + conf = json.dumps({ + "uri": "/*", + "host": "test" + i + ".com", + "plugins": { + }, + "upstream": { + "scheme": scheme, + "nodes": { + "127.0.0." + suffix + port: 1 + }, + "type": "roundrobin" + }, + }) + + conn.request("PUT", "/apisix/admin/routes/" + i, conf, + headers={ + "X-API-KEY":"edd1c9f034335f136f87ad84b625c8f1", + }) + response = conn.getresponse() + assert response.status <= 300, response.read() + +def req(): + route_id = random.randrange(TOTOL_ROUTES) + conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn.request("GET", "/server_addr", + headers={ + "Host":"test" + str(route_id) + ".com", + }) + response = conn.getresponse() + assert response.status == 200, response.read() + ip = response.read().rstrip().decode() + suffix = str(route_id + 1) + assert "127.0.0." + suffix == ip, f"expect: 127.0.0.{suffix}, actual: {ip}" + +def run_in_thread(): + for i in range(REQ_PER_THREAD): + req() + +@check_leak +def run(): + th = [threading.Thread(target=run_in_thread) for i in range(THREADS_NUM)] + for t in th: + t.start() + for t in th: + t.join() + + +if __name__ == "__main__": + run_test(create_route, run) + diff --git a/t/fuzzing/public.py b/t/fuzzing/public.py index 500b3d39b222..0897ec476bbe 100644 --- a/t/fuzzing/public.py +++ b/t/fuzzing/public.py @@ -30,6 +30,10 @@ def apisix_pwd(): return os.environ.get("APISIX_FUZZING_PWD") or \ (str(Path.home()) + "/work/apisix/apisix") +def connect_admin(): + conn = http.client.HTTPConnection("127.0.0.1", port=9180) + return conn + def check_log(): boofuzz_log = cur_dir() + "/test.log" apisix_errorlog = apisix_pwd() + "/logs/error.log" diff --git a/t/fuzzing/serverless_route_test.py b/t/fuzzing/serverless_route_test.py index e84085f574e0..564914734877 100644 --- a/t/fuzzing/serverless_route_test.py +++ b/t/fuzzing/serverless_route_test.py @@ -22,7 +22,7 @@ from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_size, s_static, s_string def create_route(): - command = '''curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/post*", "methods": ["POST"], diff --git a/t/fuzzing/simple_http.py b/t/fuzzing/simple_http.py index f2d2099f7ce5..b3db2027e3f2 100755 --- a/t/fuzzing/simple_http.py +++ b/t/fuzzing/simple_http.py @@ -22,7 +22,7 @@ import json import random import threading -from public import check_leak, LEAK_COUNT, run_test +from public import check_leak, LEAK_COUNT, run_test, connect_admin REQ_PER_THREAD = 50 @@ -40,7 +40,7 @@ def create_route(): } } }) - conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn = connect_admin() conn.request("PUT", "/apisix/admin/consumers", conf, headers={ "X-API-KEY":"edd1c9f034335f136f87ad84b625c8f1", @@ -49,7 +49,7 @@ def create_route(): assert response.status <= 300, response.read() for i in range(TOTOL_ROUTES): - conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn = connect_admin() i = str(i) conf = json.dumps({ "uri": "/*", diff --git a/t/fuzzing/simpleroute_test.py b/t/fuzzing/simpleroute_test.py index 8a7e43119810..9ea56ce693b1 100755 --- a/t/fuzzing/simpleroute_test.py +++ b/t/fuzzing/simpleroute_test.py @@ -22,7 +22,7 @@ from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string def create_route(): - command = '''curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/get*", "methods": ["GET"], diff --git a/t/fuzzing/upstream/nginx.conf b/t/fuzzing/upstream/nginx.conf index 3c6405370028..7a94517ce069 100644 --- a/t/fuzzing/upstream/nginx.conf +++ b/t/fuzzing/upstream/nginx.conf @@ -53,6 +53,23 @@ http { ngx.sleep(tonumber(ngx.var.arg_seconds or 1)) } } + + location /server_addr { + content_by_lua_block { + ngx.say(ngx.var.server_addr) + } + } } + server { + listen 6667 ssl; + ssl_certificate ../../certs/apisix.crt; + ssl_certificate_key ../../certs/apisix.key; + + location /server_addr { + content_by_lua_block { + ngx.say(ngx.var.server_addr) + } + } + } } diff --git a/t/fuzzing/vars_route_test.py b/t/fuzzing/vars_route_test.py index c2559fe3dc39..dc8325484c3c 100644 --- a/t/fuzzing/vars_route_test.py +++ b/t/fuzzing/vars_route_test.py @@ -22,7 +22,7 @@ from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string def create_route(): - command = '''curl -i http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "uri": "/parameter*", "vars": [ diff --git a/t/grpc_server_example/main.go b/t/grpc_server_example/main.go index 18bda0536d00..1b533582c464 100644 --- a/t/grpc_server_example/main.go +++ b/t/grpc_server_example/main.go @@ -172,6 +172,31 @@ func (s *server) SayHelloBidirectionalStream(stream pb.Greeter_SayHelloBidirecti } } +// SayMultipleHello implements helloworld.GreeterServer +func (s *server) SayMultipleHello(ctx context.Context, in *pb.MultipleHelloRequest) (*pb.MultipleHelloReply, error) { + log.Printf("Received: %v", in.Name) + log.Printf("Enum Gender: %v", in.GetGenders()) + msg := "Hello " + in.Name + + persons := in.GetPersons() + if persons != nil { + for _, person := range persons { + if person.GetName() != "" { + msg += fmt.Sprintf(", name: %v", person.GetName()) + } + if person.GetAge() != 0 { + msg += fmt.Sprintf(", age: %v", person.GetAge()) + } + } + } + + return &pb.MultipleHelloReply{ + Message: msg, + Items: in.GetItems(), + Genders: in.GetGenders(), + }, nil +} + func (s *server) Run(ctx context.Context, in *pb.Request) (*pb.Response, error) { return &pb.Response{Body: in.User.Name + " " + in.Body}, nil } diff --git a/t/grpc_server_example/proto/helloworld.pb.go b/t/grpc_server_example/proto/helloworld.pb.go index 9cb209566825..71b16a3455c6 100644 --- a/t/grpc_server_example/proto/helloworld.pb.go +++ b/t/grpc_server_example/proto/helloworld.pb.go @@ -1,8 +1,10 @@ -// Copyright 2015 gRPC authors. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // @@ -11,11 +13,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.6.1 +// protoc-gen-go v1.25.0-devel +// protoc v3.12.4 // source: proto/helloworld.proto package proto @@ -374,6 +377,140 @@ func (x *PlusReply) GetResult() int64 { return 0 } +type MultipleHelloRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + Genders []Gender `protobuf:"varint,3,rep,packed,name=genders,proto3,enum=helloworld.Gender" json:"genders,omitempty"` + Persons []*Person `protobuf:"bytes,4,rep,name=persons,proto3" json:"persons,omitempty"` +} + +func (x *MultipleHelloRequest) Reset() { + *x = MultipleHelloRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultipleHelloRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultipleHelloRequest) ProtoMessage() {} + +func (x *MultipleHelloRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultipleHelloRequest.ProtoReflect.Descriptor instead. +func (*MultipleHelloRequest) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{5} +} + +func (x *MultipleHelloRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MultipleHelloRequest) GetItems() []string { + if x != nil { + return x.Items + } + return nil +} + +func (x *MultipleHelloRequest) GetGenders() []Gender { + if x != nil { + return x.Genders + } + return nil +} + +func (x *MultipleHelloRequest) GetPersons() []*Person { + if x != nil { + return x.Persons + } + return nil +} + +type MultipleHelloReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + Genders []Gender `protobuf:"varint,3,rep,packed,name=genders,proto3,enum=helloworld.Gender" json:"genders,omitempty"` +} + +func (x *MultipleHelloReply) Reset() { + *x = MultipleHelloReply{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultipleHelloReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultipleHelloReply) ProtoMessage() {} + +func (x *MultipleHelloReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultipleHelloReply.ProtoReflect.Descriptor instead. +func (*MultipleHelloReply) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{6} +} + +func (x *MultipleHelloReply) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *MultipleHelloReply) GetItems() []string { + if x != nil { + return x.Items + } + return nil +} + +func (x *MultipleHelloReply) GetGenders() []Gender { + if x != nil { + return x.Genders + } + return nil +} + var File_proto_helloworld_proto protoreflect.FileDescriptor var file_proto_helloworld_proto_rawDesc = []byte{ @@ -403,40 +540,63 @@ var file_proto_helloworld_proto_rawDesc = []byte{ 0x0a, 0x01, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x01, 0x62, 0x22, 0x23, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x2a, 0x40, 0x0a, 0x06, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x0e, 0x47, - 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0f, 0x0a, 0x0b, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x01, - 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x46, 0x45, 0x4d, 0x41, 0x4c, - 0x45, 0x10, 0x02, 0x32, 0xc0, 0x03, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x3e, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x18, 0x2e, 0x68, 0x65, - 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, - 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, - 0x38, 0x0a, 0x04, 0x50, 0x6c, 0x75, 0x73, 0x12, 0x17, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, - 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x15, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, - 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12, 0x53, 0x61, 0x79, - 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x41, 0x66, 0x74, 0x65, 0x72, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, - 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, - 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, - 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, - 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, - 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x30, - 0x01, 0x12, 0x4c, 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, - 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, - 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x12, - 0x55, 0x0a, 0x1b, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x42, 0x69, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, + 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x14, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, + 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, + 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x52, 0x07, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x73, + 0x22, 0x72, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, + 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, + 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x07, 0x67, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x73, 0x2a, 0x40, 0x0a, 0x06, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x0e, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x46, 0x45, + 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x32, 0x98, 0x04, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, 0x74, + 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x22, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x50, 0x6c, 0x75, 0x73, 0x12, 0x17, 0x2e, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, + 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x12, + 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x41, 0x66, 0x74, 0x65, 0x72, 0x44, 0x65, 0x6c, + 0x61, 0x79, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, + 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x10, 0x53, 0x61, 0x79, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x6c, + 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x68, + 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x4c, + 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, + 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, + 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x14, + 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, + 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, + 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x12, 0x55, 0x0a, 0x1b, 0x53, 0x61, + 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, + 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, + 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x30, + 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -452,36 +612,43 @@ func file_proto_helloworld_proto_rawDescGZIP() []byte { } var file_proto_helloworld_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_proto_helloworld_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_proto_helloworld_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_proto_helloworld_proto_goTypes = []interface{}{ - (Gender)(0), // 0: helloworld.Gender - (*Person)(nil), // 1: helloworld.Person - (*HelloRequest)(nil), // 2: helloworld.HelloRequest - (*HelloReply)(nil), // 3: helloworld.HelloReply - (*PlusRequest)(nil), // 4: helloworld.PlusRequest - (*PlusReply)(nil), // 5: helloworld.PlusReply + (Gender)(0), // 0: helloworld.Gender + (*Person)(nil), // 1: helloworld.Person + (*HelloRequest)(nil), // 2: helloworld.HelloRequest + (*HelloReply)(nil), // 3: helloworld.HelloReply + (*PlusRequest)(nil), // 4: helloworld.PlusRequest + (*PlusReply)(nil), // 5: helloworld.PlusReply + (*MultipleHelloRequest)(nil), // 6: helloworld.MultipleHelloRequest + (*MultipleHelloReply)(nil), // 7: helloworld.MultipleHelloReply } var file_proto_helloworld_proto_depIdxs = []int32{ - 0, // 0: helloworld.HelloRequest.gender:type_name -> helloworld.Gender - 1, // 1: helloworld.HelloRequest.person:type_name -> helloworld.Person - 0, // 2: helloworld.HelloReply.gender:type_name -> helloworld.Gender - 2, // 3: helloworld.Greeter.SayHello:input_type -> helloworld.HelloRequest - 4, // 4: helloworld.Greeter.Plus:input_type -> helloworld.PlusRequest - 2, // 5: helloworld.Greeter.SayHelloAfterDelay:input_type -> helloworld.HelloRequest - 2, // 6: helloworld.Greeter.SayHelloServerStream:input_type -> helloworld.HelloRequest - 2, // 7: helloworld.Greeter.SayHelloClientStream:input_type -> helloworld.HelloRequest - 2, // 8: helloworld.Greeter.SayHelloBidirectionalStream:input_type -> helloworld.HelloRequest - 3, // 9: helloworld.Greeter.SayHello:output_type -> helloworld.HelloReply - 5, // 10: helloworld.Greeter.Plus:output_type -> helloworld.PlusReply - 3, // 11: helloworld.Greeter.SayHelloAfterDelay:output_type -> helloworld.HelloReply - 3, // 12: helloworld.Greeter.SayHelloServerStream:output_type -> helloworld.HelloReply - 3, // 13: helloworld.Greeter.SayHelloClientStream:output_type -> helloworld.HelloReply - 3, // 14: helloworld.Greeter.SayHelloBidirectionalStream:output_type -> helloworld.HelloReply - 9, // [9:15] is the sub-list for method output_type - 3, // [3:9] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 0, // 0: helloworld.HelloRequest.gender:type_name -> helloworld.Gender + 1, // 1: helloworld.HelloRequest.person:type_name -> helloworld.Person + 0, // 2: helloworld.HelloReply.gender:type_name -> helloworld.Gender + 0, // 3: helloworld.MultipleHelloRequest.genders:type_name -> helloworld.Gender + 1, // 4: helloworld.MultipleHelloRequest.persons:type_name -> helloworld.Person + 0, // 5: helloworld.MultipleHelloReply.genders:type_name -> helloworld.Gender + 2, // 6: helloworld.Greeter.SayHello:input_type -> helloworld.HelloRequest + 4, // 7: helloworld.Greeter.Plus:input_type -> helloworld.PlusRequest + 2, // 8: helloworld.Greeter.SayHelloAfterDelay:input_type -> helloworld.HelloRequest + 6, // 9: helloworld.Greeter.SayMultipleHello:input_type -> helloworld.MultipleHelloRequest + 2, // 10: helloworld.Greeter.SayHelloServerStream:input_type -> helloworld.HelloRequest + 2, // 11: helloworld.Greeter.SayHelloClientStream:input_type -> helloworld.HelloRequest + 2, // 12: helloworld.Greeter.SayHelloBidirectionalStream:input_type -> helloworld.HelloRequest + 3, // 13: helloworld.Greeter.SayHello:output_type -> helloworld.HelloReply + 5, // 14: helloworld.Greeter.Plus:output_type -> helloworld.PlusReply + 3, // 15: helloworld.Greeter.SayHelloAfterDelay:output_type -> helloworld.HelloReply + 7, // 16: helloworld.Greeter.SayMultipleHello:output_type -> helloworld.MultipleHelloReply + 3, // 17: helloworld.Greeter.SayHelloServerStream:output_type -> helloworld.HelloReply + 3, // 18: helloworld.Greeter.SayHelloClientStream:output_type -> helloworld.HelloReply + 3, // 19: helloworld.Greeter.SayHelloBidirectionalStream:output_type -> helloworld.HelloReply + 13, // [13:20] is the sub-list for method output_type + 6, // [6:13] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_proto_helloworld_proto_init() } @@ -550,6 +717,30 @@ func file_proto_helloworld_proto_init() { return nil } } + file_proto_helloworld_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultipleHelloRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultipleHelloReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -557,7 +748,7 @@ func file_proto_helloworld_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_helloworld_proto_rawDesc, NumEnums: 1, - NumMessages: 5, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, diff --git a/t/grpc_server_example/proto/helloworld.proto b/t/grpc_server_example/proto/helloworld.proto index 2e18a467c822..db056fadec25 100644 --- a/t/grpc_server_example/proto/helloworld.proto +++ b/t/grpc_server_example/proto/helloworld.proto @@ -25,6 +25,7 @@ service Greeter { rpc SayHello (HelloRequest) returns (HelloReply) {} rpc Plus (PlusRequest) returns (PlusReply) {} rpc SayHelloAfterDelay (HelloRequest) returns (HelloReply) {} + rpc SayMultipleHello(MultipleHelloRequest) returns (MultipleHelloReply) {} // Server side streaming. rpc SayHelloServerStream (HelloRequest) returns (stream HelloReply) {} @@ -34,6 +35,7 @@ service Greeter { // Bidirectional streaming. rpc SayHelloBidirectionalStream (stream HelloRequest) returns (stream HelloReply) {} + } enum Gender { @@ -68,3 +70,16 @@ message PlusRequest { message PlusReply { int64 result = 1; } + +message MultipleHelloRequest { + string name = 1; + repeated string items = 2; + repeated Gender genders = 3; + repeated Person persons = 4; +} + +message MultipleHelloReply{ + string message = 1; + repeated string items = 2; + repeated Gender genders = 3; +} diff --git a/t/grpc_server_example/proto/helloworld_grpc.pb.go b/t/grpc_server_example/proto/helloworld_grpc.pb.go index 7d6d8ef8b7df..c0527d7542f8 100644 --- a/t/grpc_server_example/proto/helloworld_grpc.pb.go +++ b/t/grpc_server_example/proto/helloworld_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: proto/helloworld.proto package proto @@ -22,6 +26,7 @@ type GreeterClient interface { SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) Plus(ctx context.Context, in *PlusRequest, opts ...grpc.CallOption) (*PlusReply, error) SayHelloAfterDelay(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) + SayMultipleHello(ctx context.Context, in *MultipleHelloRequest, opts ...grpc.CallOption) (*MultipleHelloReply, error) // Server side streaming. SayHelloServerStream(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (Greeter_SayHelloServerStreamClient, error) // Client side streaming. @@ -65,6 +70,15 @@ func (c *greeterClient) SayHelloAfterDelay(ctx context.Context, in *HelloRequest return out, nil } +func (c *greeterClient) SayMultipleHello(ctx context.Context, in *MultipleHelloRequest, opts ...grpc.CallOption) (*MultipleHelloReply, error) { + out := new(MultipleHelloReply) + err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayMultipleHello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *greeterClient) SayHelloServerStream(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (Greeter_SayHelloServerStreamClient, error) { stream, err := c.cc.NewStream(ctx, &Greeter_ServiceDesc.Streams[0], "/helloworld.Greeter/SayHelloServerStream", opts...) if err != nil { @@ -170,6 +184,7 @@ type GreeterServer interface { SayHello(context.Context, *HelloRequest) (*HelloReply, error) Plus(context.Context, *PlusRequest) (*PlusReply, error) SayHelloAfterDelay(context.Context, *HelloRequest) (*HelloReply, error) + SayMultipleHello(context.Context, *MultipleHelloRequest) (*MultipleHelloReply, error) // Server side streaming. SayHelloServerStream(*HelloRequest, Greeter_SayHelloServerStreamServer) error // Client side streaming. @@ -192,6 +207,9 @@ func (UnimplementedGreeterServer) Plus(context.Context, *PlusRequest) (*PlusRepl func (UnimplementedGreeterServer) SayHelloAfterDelay(context.Context, *HelloRequest) (*HelloReply, error) { return nil, status.Errorf(codes.Unimplemented, "method SayHelloAfterDelay not implemented") } +func (UnimplementedGreeterServer) SayMultipleHello(context.Context, *MultipleHelloRequest) (*MultipleHelloReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SayMultipleHello not implemented") +} func (UnimplementedGreeterServer) SayHelloServerStream(*HelloRequest, Greeter_SayHelloServerStreamServer) error { return status.Errorf(codes.Unimplemented, "method SayHelloServerStream not implemented") } @@ -268,6 +286,24 @@ func _Greeter_SayHelloAfterDelay_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _Greeter_SayMultipleHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MultipleHelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayMultipleHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayMultipleHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayMultipleHello(ctx, req.(*MultipleHelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Greeter_SayHelloServerStream_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HelloRequest) if err := stream.RecvMsg(m); err != nil { @@ -360,6 +396,10 @@ var Greeter_ServiceDesc = grpc.ServiceDesc{ MethodName: "SayHelloAfterDelay", Handler: _Greeter_SayHelloAfterDelay_Handler, }, + { + MethodName: "SayMultipleHello", + Handler: _Greeter_SayMultipleHello_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/t/grpc_server_example/proto/import.pb.go b/t/grpc_server_example/proto/import.pb.go index 28fabf3f3726..a5575fdbd396 100644 --- a/t/grpc_server_example/proto/import.pb.go +++ b/t/grpc_server_example/proto/import.pb.go @@ -1,7 +1,24 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.6.1 +// protoc-gen-go v1.25.0-devel +// protoc v3.12.4 // source: proto/import.proto package proto diff --git a/t/grpc_server_example/proto/src.pb.go b/t/grpc_server_example/proto/src.pb.go index 8e6a32ae379b..74fa884d122e 100644 --- a/t/grpc_server_example/proto/src.pb.go +++ b/t/grpc_server_example/proto/src.pb.go @@ -1,7 +1,24 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.6.1 +// protoc-gen-go v1.25.0-devel +// protoc v3.12.4 // source: proto/src.proto package proto diff --git a/t/grpc_server_example/proto/src_grpc.pb.go b/t/grpc_server_example/proto/src_grpc.pb.go index 01fe1502d489..d4015ed99142 100644 --- a/t/grpc_server_example/proto/src_grpc.pb.go +++ b/t/grpc_server_example/proto/src_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: proto/src.proto package proto diff --git a/t/lib/server.lua b/t/lib/server.lua index 029f463e534f..a8ec77af6960 100644 --- a/t/lib/server.lua +++ b/t/lib/server.lua @@ -298,9 +298,15 @@ function _M.wolf_rbac_access_check() ngx.say(json_encode({ok=true, data={ userInfo={nickname="administrator", username="admin", id="100"} }})) - else + elseif resName == '/hello/500' then + ngx.status = 500 + ngx.say(json_encode({ok=false, reason="ERR_SERVER_ERROR"})) + elseif resName == '/hello/401' then ngx.status = 401 - ngx.say(json_encode({ok=false, reason="no permission to access"})) + ngx.say(json_encode({ok=false, reason="ERR_TOKEN_INVALID"})) + else + ngx.status = 403 + ngx.say(json_encode({ok=false, reason="ERR_ACCESS_DENIED"})) end end @@ -377,6 +383,10 @@ for i = 1, 100 do _M["print_uri_" .. i] = print_uri end +function _M.print_uri_detailed() + ngx.say("ngx.var.uri: ", ngx.var.uri) + ngx.say("ngx.var.request_uri: ", ngx.var.request_uri) +end function _M.headers() local args = ngx.req.get_uri_args() diff --git a/t/node/chash-hashon.t b/t/node/chash-hashon.t index 0b9c161d9031..3cd24559c1e5 100644 --- a/t/node/chash-hashon.t +++ b/t/node/chash-hashon.t @@ -51,17 +51,14 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "username": "jack", - "plugins": { - "key-auth": { - "key": "auth-jack" - } + "value": { + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" } } - }, - "action": "set" + } }]] ) @@ -82,17 +79,14 @@ __DATA__ } }]], [[{ - "node": { - "value": { - "username": "tom", - "plugins": { - "key-auth": { - "key": "auth-tom" - } + "value": { + "username": "tom", + "plugins": { + "key-auth": { + "key": "auth-tom" } } - }, - "action": "set" + } }]] ) ngx.say(code .. " " ..body) diff --git a/t/node/client-mtls-openresty-1-19.t b/t/node/client-mtls-openresty.t similarity index 94% rename from t/node/client-mtls-openresty-1-19.t rename to t/node/client-mtls-openresty.t index a7bf517c17c1..1779abe09126 100644 --- a/t/node/client-mtls-openresty-1-19.t +++ b/t/node/client-mtls-openresty.t @@ -20,11 +20,7 @@ my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; my $version = eval { `$nginx_binary -V 2>&1` }; if ($version !~ m/\/apisix-nginx-module/) { - if ($version =~ m/\/1.17.8/) { - plan(skip_all => "require OpenResty 1.19+"); - } else { - plan('no_plan'); - } + plan('no_plan'); } else { plan(skip_all => "for vanilla OpenResty only"); } @@ -78,7 +74,7 @@ __DATA__ depth = 2, } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/node/client-mtls.t b/t/node/client-mtls.t index afccc93752b9..aa326dbe98b6 100644 --- a/t/node/client-mtls.t +++ b/t/node/client-mtls.t @@ -58,7 +58,7 @@ __DATA__ ca = ("test.com"):rep(128), } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -92,7 +92,7 @@ GET /t client = { } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -173,7 +173,7 @@ GET /t depth = 2, } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/node/consumer-plugin2.t b/t/node/consumer-plugin2.t index 249441a6c1a4..c05762f40e2d 100644 --- a/t/node/consumer-plugin2.t +++ b/t/node/consumer-plugin2.t @@ -238,3 +238,68 @@ x-real-ip: 127.0.0.1 } --- response_body {"key-auth":true,"proxy-rewrite":true} + + + +=== TEST 7: configure non-auth plugins in the consumer and run it's rewrite phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit routes and ip-restriction work well +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} diff --git a/t/node/grpc-proxy-unary.t b/t/node/grpc-proxy-unary.t index 393016d1578f..f1a063c54a15 100644 --- a/t/node/grpc-proxy-unary.t +++ b/t/node/grpc-proxy-unary.t @@ -70,8 +70,8 @@ routes: methods: [ POST ] - service_protocol: grpc upstream: + scheme: grpc nodes: "127.0.0.1:50051": 1 type: roundrobin @@ -114,7 +114,7 @@ grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plai === TEST 4: Unary API gRPC proxy with tls --- http2 --- apisix_yaml -ssl: +ssls: - id: 1 cert: "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n" diff --git a/t/node/grpc-proxy.t b/t/node/grpc-proxy.t index c4338e77bfe3..5c14e01d0007 100644 --- a/t/node/grpc-proxy.t +++ b/t/node/grpc-proxy.t @@ -102,13 +102,13 @@ apikey: user-key upstreams: - id: 1 type: roundrobin + scheme: grpc nodes: "127.0.0.1:9088": 1 routes: - id: 1 methods: - POST - service_protocol: grpc uri: "/hello" upstream_id: 1 #END @@ -130,7 +130,6 @@ routes: - id: 1 methods: - POST - service_protocol: grpc uri: "/hello" plugins: key-auth: @@ -139,6 +138,7 @@ routes: - jack upstream: type: roundrobin + scheme: grpc nodes: "127.0.0.1:9088": 1 #END diff --git a/t/node/healthcheck-passive.t b/t/node/healthcheck-passive.t index be85ecc9df35..f3f694b9f38f 100644 --- a/t/node/healthcheck-passive.t +++ b/t/node/healthcheck-passive.t @@ -165,3 +165,173 @@ GET /t --- error_code: 400 --- response_body {"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the required: [\"active\"] or [\"active\",\"passive\"]"} + + + +=== TEST 4: set route(only active + active & passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello_", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: only one route should have passive healthcheck +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- only /hello_ has passive healthcheck + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/enabled healthcheck passive/ +--- grep_error_log_out +enabled healthcheck passive + + + +=== TEST 6: make sure passive healthcheck works (conf is not corrupted by the default value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/\[healthcheck\] \([^)]+\) unhealthy HTTP increment/ +--- grep_error_log_out +[healthcheck] (upstream#/apisix/routes/2) unhealthy HTTP increment diff --git a/t/node/https-proxy.t b/t/node/https-proxy.t index 236fdaf76316..e7ff50185d1c 100644 --- a/t/node/https-proxy.t +++ b/t/node/https-proxy.t @@ -35,7 +35,7 @@ run_tests; __DATA__ -=== TEST 1: add route to HTTPS upstream (old way) +=== TEST 1: add route to HTTPS upstream --- config location /t { content_by_lua_block { @@ -44,101 +44,6 @@ __DATA__ ngx.HTTP_PUT, [[{ "methods": ["GET"], - "plugins": { - "proxy-rewrite": { - "scheme": "https" - } - }, - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1983": 1 - } - }, - "uri": "/hello" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed - - - -=== TEST 2: hit the upstream (old way) ---- request -GET /hello ---- more_headers -host: www.sni.com ---- error_log -Receive SNI: www.sni.com - - - -=== TEST 3: add route to HTTPS upstream ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "methods": ["GET"], - "upstream": { - "scheme": "https", - "type": "roundrobin", - "nodes": { - "127.0.0.1:1983": 1 - } - }, - "uri": "/hello" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed - - - -=== TEST 4: hit the upstream ---- request -GET /hello ---- more_headers -host: www.sni.com ---- error_log -Receive SNI: www.sni.com - - - -=== TEST 5: add route to HTTPS upstream (mix) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "methods": ["GET"], - "plugins": { - "proxy-rewrite": { - "scheme": "https" - } - }, "upstream": { "scheme": "https", "type": "roundrobin", @@ -163,7 +68,7 @@ passed -=== TEST 6: hit the upstream +=== TEST 2: hit the upstream --- request GET /hello --- more_headers @@ -173,7 +78,7 @@ Receive SNI: www.sni.com -=== TEST 7: use 443 as the default port +=== TEST 3: use 443 as the default port --- apisix_yaml routes: - @@ -192,7 +97,7 @@ upstream: "https://127.0.0.1:443/hello" -=== TEST 8: use 80 as the http's default port +=== TEST 4: use 80 as the http's default port --- apisix_yaml routes: - @@ -210,7 +115,7 @@ upstream: "http://127.0.0.1:80/hello" -=== TEST 9: rewrite SNI +=== TEST 5: rewrite SNI --- log_level: debug --- apisix_yaml routes: @@ -237,7 +142,7 @@ x-real-ip: 127.0.0.1 -=== TEST 10: node's SNI +=== TEST 6: node's SNI --- log_level: debug --- apisix_yaml routes: diff --git a/t/node/merge-route.t b/t/node/merge-route.t index b6f1d467e8d5..2d1f48f4a1ae 100644 --- a/t/node/merge-route.t +++ b/t/node/merge-route.t @@ -180,7 +180,9 @@ qr/1980/ "time_window": 60, "rejected_code": 503, "key": "remote_addr", - "disable": true + "_meta": { + "disable": true + } } }, "uri": "/server_port", @@ -249,6 +251,7 @@ qr/merge_service_route.*"time_window":60/] ngx.HTTP_PUT, [[{ "upstream": { + "scheme": "https", "type": "roundrobin", "nodes": { "httpbin.org:443": 1 @@ -280,11 +283,11 @@ passed local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ - "uri": "/get", + "uri": "/fake", "host": "httpbin.org", "plugins": { "proxy-rewrite": { - "scheme": "https" + "uri": "/get" } }, "service_id": "1" @@ -308,7 +311,7 @@ passed === TEST 12: hit route --- request -GET /get +GET /fake --- more_headers host: httpbin.org --- response_body eval @@ -321,7 +324,7 @@ qr/"Host": "httpbin.org"/ === TEST 13: not hit route --- request -GET /get +GET /fake --- more_headers host: httpbin.orgxxx --- error_code: 404 diff --git a/t/node/plugin-configs.t b/t/node/plugin-configs.t index 770392276a77..d6c0cb75c762 100644 --- a/t/node/plugin-configs.t +++ b/t/node/plugin-configs.t @@ -249,3 +249,64 @@ property "block_rules" validation failed --- response_body hello hello world + + + +=== TEST 5: don't override the plugin in the route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + }, + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/helloaa", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugin_config_id": 1, + "plugins": { + "response-rewrite": { + "body": "world" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local code, err, org_body = t('/helloaa') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.say(org_body) + } + } +--- response_body +world diff --git a/t/node/plugin.t b/t/node/plugin.t new file mode 100644 index 000000000000..f2c54c72bd56 --- /dev/null +++ b/t/node/plugin.t @@ -0,0 +1,48 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: set custom log format +--- extra_init_by_lua + local exp = require("apisix.plugins.example-plugin") + exp.destroy = function() + ngx.log(ngx.WARN, "destroy method called") + end +--- config + location /t { + return 200 "dummy"; + } +--- shutdown_error_log +destroy method called diff --git a/t/node/upstream-domain.t b/t/node/upstream-domain.t index 2cf71dd87696..12b7f53aa2d1 100644 --- a/t/node/upstream-domain.t +++ b/t/node/upstream-domain.t @@ -174,9 +174,7 @@ failed to parse domain: httpbin.orgx local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] + ngx.HTTP_DELETE ) if code >= 300 then @@ -202,9 +200,7 @@ passed local t = require("lib.test_admin").test local code, body = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]] + ngx.HTTP_DELETE ) if code >= 300 then diff --git a/t/node/upstream-ipv6.t b/t/node/upstream-ipv6.t index 51d2e8b84610..8aa39f6cf080 100644 --- a/t/node/upstream-ipv6.t +++ b/t/node/upstream-ipv6.t @@ -108,3 +108,187 @@ GET /hello hello world --- no_error_log [error] + + + +=== TEST 5: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 6: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 7: set upstream, one array item to specify node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "[::1]", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 8: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] + + + +=== TEST 9: set upstream, one hash key to specify node, in wrong format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "::1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: hit routes +--- request +GET /hello +--- error_code: 502 +--- error_log +connect() to [::0.1.25.128]:80 failed + + + +=== TEST 11: set upstream, two array items to specify nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + }, + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 12: hit routes +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] diff --git a/t/node/upstream-keepalive-pool.t b/t/node/upstream-keepalive-pool.t index 084522b4e07a..26f9306d079c 100644 --- a/t/node/upstream-keepalive-pool.t +++ b/t/node/upstream-keepalive-pool.t @@ -635,3 +635,106 @@ qr/lua balancer: keepalive create pool, .*/ qr/^lua balancer: keepalive create pool, crc32: \S+, size: 8 lua balancer: keepalive create pool, crc32: \S+, size: 4 $/ + + + +=== TEST 14: upstreams with SNI, then without SNI +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + + local code, body = test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + }, + "pass_host": "rewrite", + "upstream_host": "a.com", + "keepalive_pool": { + "size": 4 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "http", + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + pass_host = "rewrite", + upstream_host = "b.com", + keepalive_pool = { + size = 8 + } + } + local code, body = test('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + for i = 1, 2 do + local code, body = test('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri":"/hello/]] .. i .. [[", + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream_id": ]] .. i .. [[ + }]]) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + end + } + } +--- response_body + + + +=== TEST 15: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + for i = 0, 1 do + local idx = i % 2 + 1 + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello/" .. idx) + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- grep_error_log eval +qr/lua balancer: keepalive create pool, .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 4 +lua balancer: keepalive create pool, crc32: \S+, size: 8 +$/ diff --git a/t/node/upstream-mtls.t b/t/node/upstream-mtls.t index c909dbc9a64f..eaf3fe2fd15c 100644 --- a/t/node/upstream-mtls.t +++ b/t/node/upstream-mtls.t @@ -167,7 +167,7 @@ decrypt ssl key failed end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) -- upstream local data = { @@ -203,7 +203,7 @@ decrypt ssl key failed end res = json.decode(res) - ngx.say(res.node.value.tls.client_key == ssl_key) + ngx.say(res.value.tls.client_key == ssl_key) local data = { upstream = { @@ -240,7 +240,7 @@ decrypt ssl key failed end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) } } --- request @@ -387,7 +387,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) -- upstream local data = { @@ -423,7 +423,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.tls.client_key == ssl_key) + ngx.say(res.value.tls.client_key == ssl_key) local data = { upstream = { @@ -460,7 +460,7 @@ apisix: end res = json.decode(res) - ngx.say(res.node.value.upstream.tls.client_key == ssl_key) + ngx.say(res.value.upstream.tls.client_key == ssl_key) } } --- request @@ -562,7 +562,7 @@ hello world cert = ssl_cert, key = ssl_key } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -629,7 +629,7 @@ hello world cert = ssl_cert, key = ssl_key } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) @@ -664,7 +664,7 @@ failed to get ssl cert: ssl type should be 'client' local t = require("lib.test_admin") local json = require("toolkit.json") - local code, body = t.test('/apisix/admin/ssl/1', ngx.HTTP_DELETE) + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) if code >= 300 then ngx.status = code diff --git a/t/node/upstream-websocket.t b/t/node/upstream-websocket.t index 5ca21d28ee33..a24474749b30 100644 --- a/t/node/upstream-websocket.t +++ b/t/node/upstream-websocket.t @@ -254,7 +254,7 @@ qr/failed to new websocket: bad "upgrade" request header: nil/ local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "127.0.0.1"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/node/upstream.t b/t/node/upstream.t index 70da36145b9b..ec6da1f82f33 100644 --- a/t/node/upstream.t +++ b/t/node/upstream.t @@ -140,11 +140,7 @@ hello world ngx.sleep(0.5) local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.print("[delete] code: ", code, " message: ", message) } @@ -164,11 +160,7 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/routes/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.say("[delete] code: ", code, " message: ", message) } @@ -188,11 +180,7 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.say("[delete] code: ", code, " message: ", message) } @@ -212,11 +200,7 @@ GET /t content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/upstreams/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] + ngx.HTTP_DELETE ) ngx.say("[delete] code: ", code) } @@ -436,7 +420,6 @@ GET /t } --- request GET /t ---- skip_nginx: 5: < 1.19.0 --- response_body passed --- no_error_log @@ -447,7 +430,6 @@ passed === TEST 18: hit route --- request GET /uri ---- skip_nginx: 5: < 1.19.0 --- response_body eval qr/host: 127.0.0.1/ --- error_log @@ -494,7 +476,6 @@ proxy request to 127.0.0.1:1980 } --- request GET /t ---- skip_nginx: 5: < 1.19.0 --- response_body passed --- no_error_log @@ -505,7 +486,6 @@ passed === TEST 20: hit route --- request GET /uri ---- skip_nginx: 5: < 1.19.0 --- response_body eval qr/host: localhost/ --- error_log @@ -607,7 +587,6 @@ qr/host: localhost:1980/ ngx.say(body) } } ---- skip_nginx: 5: < 1.19.0 --- request GET /t --- response_body @@ -619,7 +598,6 @@ passed === TEST 24: hit route --- log_level: debug ---- skip_nginx: 5: < 1.19.0 --- request GET /uri --- error_log diff --git a/t/plugin/authz-keycloak.t b/t/plugin/authz-keycloak.t index 7eb287b5a8e9..f3428a3ad363 100644 --- a/t/plugin/authz-keycloak.t +++ b/t/plugin/authz-keycloak.t @@ -74,32 +74,7 @@ done -=== TEST 3: minimal valid configuration with audience ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.authz-keycloak") - local ok, err = plugin.check_schema({ - audience = "foo", - discovery = "https://host.domain/auth/realms/foo/.well-known/uma2-configuration" - }) - if not ok then - ngx.say(err) - end - - ngx.say("done") - } - } ---- request -GET /t ---- response_body -done ---- no_error_log -[error] - - - -=== TEST 4: minimal valid configuration w/o discovery when lazy_load_paths=true +=== TEST 3: minimal valid configuration w/o discovery when lazy_load_paths=true --- config location /t { content_by_lua_block { @@ -126,7 +101,7 @@ done -=== TEST 5: minimal valid configuration with discovery when lazy_load_paths=true +=== TEST 4: minimal valid configuration with discovery when lazy_load_paths=true --- config location /t { content_by_lua_block { @@ -152,7 +127,7 @@ done -=== TEST 6: full schema check +=== TEST 5: full schema check --- config location /t { content_by_lua_block { @@ -162,7 +137,6 @@ done token_endpoint = "https://host.domain/auth/realms/foo/protocol/openid-connect/token", resource_registration_endpoint = "https://host.domain/auth/realms/foo/authz/protection/resource_set", client_id = "University", - audience = "University", client_secret = "secret", grant_type = "urn:ietf:params:oauth:grant-type:uma-ticket", policy_enforcement_mode = "ENFORCING", @@ -197,7 +171,7 @@ done -=== TEST 7: token_endpoint and discovery both missing +=== TEST 6: token_endpoint and discovery both missing --- config location /t { content_by_lua_block { @@ -220,7 +194,7 @@ done -=== TEST 8: client_id and audience both missing +=== TEST 7: client_id missing --- config location /t { content_by_lua_block { @@ -236,14 +210,14 @@ done --- request GET /t --- response_body -allOf 2 failed: object matches none of the required: ["client_id"] or ["audience"] +property "client_id" is required done --- no_error_log [error] -=== TEST 9: resource_registration_endpoint and discovery both missing and lazy_load_paths is true +=== TEST 8: resource_registration_endpoint and discovery both missing and lazy_load_paths is true --- config location /t { content_by_lua_block { @@ -263,14 +237,14 @@ done --- request GET /t --- response_body -allOf 3 failed: object matches none of the required +allOf 2 failed: object matches none of the required done --- no_error_log [error] -=== TEST 10: Add https endpoint with ssl_verify true (default) +=== TEST 9: Add https endpoint with ssl_verify true (default) --- config location /t { content_by_lua_block { @@ -312,7 +286,7 @@ passed -=== TEST 11: TEST with fake token and https endpoint +=== TEST 10: TEST with fake token and https endpoint --- config location /t { content_by_lua_block { @@ -345,7 +319,7 @@ Error while sending authz request to https://127.0.0.1:8443/auth/realms/Universi -=== TEST 12: Add https endpoint with ssl_verify false +=== TEST 11: Add https endpoint with ssl_verify false --- config location /t { content_by_lua_block { @@ -388,7 +362,7 @@ passed -=== TEST 13: TEST for https based token verification with ssl_verify false +=== TEST 12: TEST for https based token verification with ssl_verify false --- config location /t { content_by_lua_block { @@ -418,7 +392,7 @@ Request denied: HTTP 401 Unauthorized. Body: {"error":"HTTP 401 Unauthorized"} -=== TEST 14: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values +=== TEST 13: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values --- config location /t { content_by_lua_block { @@ -460,7 +434,7 @@ passed -=== TEST 15: test for permission is empty and enforcement mode is "ENFORCING". +=== TEST 14: test for permission is empty and enforcement mode is "ENFORCING". --- config location /t { content_by_lua_block { @@ -485,7 +459,7 @@ GET /t -=== TEST 16: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values , access_denied_redirect_uri is "http://127.0.0.1/test" +=== TEST 15: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values , access_denied_redirect_uri is "http://127.0.0.1/test" --- config location /t { content_by_lua_block { @@ -528,7 +502,7 @@ passed -=== TEST 17: test for permission is empty and enforcement mode is "ENFORCING" , access_denied_redirect_uri is "http://127.0.0.1/test". +=== TEST 16: test for permission is empty and enforcement mode is "ENFORCING" , access_denied_redirect_uri is "http://127.0.0.1/test". --- config location /t { content_by_lua_block { @@ -555,7 +529,7 @@ Location: http://127.0.0.1/test -=== TEST 18: Add https endpoint with password_grant_token_generation_incoming_uri +=== TEST 17: Add https endpoint with password_grant_token_generation_incoming_uri --- config location /t { content_by_lua_block { @@ -629,7 +603,7 @@ true -=== TEST 19: no username or password +=== TEST 18: no username or password --- config location /t { content_by_lua_block { diff --git a/t/plugin/authz-keycloak2.t b/t/plugin/authz-keycloak2.t index 48d887449972..de6d2e5f773d 100644 --- a/t/plugin/authz-keycloak2.t +++ b/t/plugin/authz-keycloak2.t @@ -582,49 +582,7 @@ true -=== TEST 14: add plugin with lazy_load_paths and http_method_as_scope (using audience) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "authz-keycloak": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/uma2-configuration", - "audience": "course_management", - "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", - "lazy_load_paths": true, - "http_method_as_scope": true - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1982": 1 - }, - "type": "roundrobin" - }, - "uri": "/course/foo" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed ---- no_error_log -[error] - - - -=== TEST 15: Get access token for teacher and access view course route. +=== TEST 14: Get access token for teacher and access view course route. --- config location /t { content_by_lua_block { @@ -672,7 +630,7 @@ true -=== TEST 16: Get access token for student and access view course route. +=== TEST 15: Get access token for student and access view course route. --- config location /t { content_by_lua_block { diff --git a/t/plugin/basic-auth.t b/t/plugin/basic-auth.t index 5d626edd07af..ca2a82055b90 100644 --- a/t/plugin/basic-auth.t +++ b/t/plugin/basic-auth.t @@ -340,7 +340,7 @@ GET /t ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code @@ -384,7 +384,7 @@ GET /t ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code diff --git a/t/plugin/clickhouse-logger.t b/t/plugin/clickhouse-logger.t index 5426ce028489..ccb0be11ad91 100644 --- a/t/plugin/clickhouse-logger.t +++ b/t/plugin/clickhouse-logger.t @@ -48,6 +48,18 @@ add_block_preprocessor(sub { ngx.say("ok") } } + location /clickhouse-logger/test1 { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "clickhouse body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "clickhouse headers: " .. k .. ":" .. v) + end + ngx.say("ok") + } + } } _EOC_ @@ -131,7 +143,7 @@ passed } } --- response_body -property "endpoint_addr" is required +value should match only one schema, but matches none @@ -175,7 +187,49 @@ passed -=== TEST 5: access local server +=== TEST 5: add plugin on routes using multi clickhouse-logger +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addrs": ["http://127.0.0.1:10420/clickhouse-logger/test", + "http://127.0.0.1:10420/clickhouse-logger/test1"], + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 6: access local server --- request GET /opentracing --- response_body diff --git a/t/plugin/custom_sort_plugins.t b/t/plugin/custom_sort_plugins.t new file mode 100644 index 000000000000..41a23b9adbc8 --- /dev/null +++ b/t/plugin/custom_sort_plugins.t @@ -0,0 +1,633 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +no_long_string(); +no_root_location(); +log_level("info"); +run_tests; + +__DATA__ + +=== TEST 1: custom priority and default priority on different routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 3: routing without custom plugin order is not affected +--- request +GET /hello1 +--- response_body +serverless-pre-function +serverless-post-function + + + +=== TEST 4: custom priority and default priority on same route +# the priority of serverless-post-function is -2000, execute serverless-post-function first +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2001 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 6: merge plugins from consumer and route, execute the rewrite phase +# in the rewrite phase, the plugins on the route must be executed first, +# and then executed the rewrite phase of the plugins on the consumer, +# and the custom plugin order fails for this case. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: verify order(more requests) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local httpc = http.new() + local headers = {} + headers["apikey"] = "auth-one" + local res, err = httpc:request_uri(uri, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local res, err = httpc:request_uri(uri, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +serverless-pre-function +serverless-post-function +serverless-pre-function +serverless-post-function + + + +=== TEST 8: merge plugins form custom and route, execute the access phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "access", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "access", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: verify order +--- request +GET /hello +--- more_headers +apikey: auth-one +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 10: merge plugins form service and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 12: custom plugins sort is not affected by plugins reload +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.say(org_body) + + ngx.sleep(0.2) + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +serverless-post-function +serverless-pre-function +done +serverless-post-function +serverless-pre-function + + + +=== TEST 13: merge plugins form plugin_configs and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + } + }]] + ) + if code > 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 15: custom plugins sort on global_rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function on global rule\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function on global rule\"); + end"] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: verify order +--- request +GET /hello +--- response_body +serverless-post-function on global rule +serverless-pre-function on global rule +serverless-post-function +serverless-pre-function + + + +=== TEST 17: delete global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/plugin/echo.t b/t/plugin/echo.t index 99aec41e62c5..71571e969d6b 100644 --- a/t/plugin/echo.t +++ b/t/plugin/echo.t @@ -211,7 +211,7 @@ Location: https://www.iresty.com end local resp_data = core.json.decode(body) - ngx.say(encode_with_keys_sorted(resp_data.node.value.plugins)) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) } } --- request diff --git a/t/plugin/elasticsearch-logger.t b/t/plugin/elasticsearch-logger.t new file mode 100644 index 000000000000..2e82953f46ee --- /dev/null +++ b/t/plugin/elasticsearch-logger.t @@ -0,0 +1,453 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local ok, err + local configs = { + -- full configuration + { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services", + type = "collector" + }, + auth = { + username = "elastic", + password = "123456" + }, + ssl_verify = false, + timeout = 60, + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 2, + batch_max_size = 10, + }, + -- minimize configuration + { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + } + }, + -- property "endpoint_addr" is required + { + field = { + index = "services" + } + }, + -- property "field" is required + { + endpoint_addr = "http://127.0.0.1:9200", + }, + -- property "index" is required + { + endpoint_addr = "http://127.0.0.1:9200", + field = {} + }, + -- property "endpoint" must not end with "/" + { + endpoint_addr = "http://127.0.0.1:9200/", + field = { + index = "services" + } + } + } + + local plugin = require("apisix.plugins.elasticsearch-logger") + for i = 1, #configs do + ok, err = plugin.check_schema(configs[i]) + if err then + ngx.say(err) + else + ngx.say("passed") + end + end + } + } +--- response_body_like +passed +passed +property "endpoint_addr" is required +property "field" is required +property "field" validation failed: property "index" is required +property "endpoint_addr" validation failed: failed to match pattern "\[\^/\]\$" with "http://127.0.0.1:9200/" + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger', + ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: test route (success write) +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.get_full_log = function(ngx, conf) + return { + test = "test" + } + end + + http.request_uri = function(self, uri, params) + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_full_log(ngx, {}) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch full log body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +check elasticsearch full log body success + + + +=== TEST 4: set route (auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] successfully processed the entries + + + +=== TEST 6: set route (no auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: test route (no auth, failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] failed to process entries: elasticsearch server returned status: 401 +"reason":"missing authentication credentials for REST request [/_bulk]" +Batch Processor[elasticsearch-logger] exceeded the max_retry_count + + + +=== TEST 8: set route (error auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "111111" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: test route (error auth failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] failed to process entries +Batch Processor[elasticsearch-logger] exceeded the max_retry_count + + + +=== TEST 10: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger', + ngx.HTTP_PUT, [[{ + "log_format": { + "custom_host": "$host", + "custom_timestamp": "$time_iso8601", + "custom_client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body_like +passed +passed + + + +=== TEST 11: hit route and check custom elasticsearch logger +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.get_custom_format_log = function(ctx, format) + return { + test = "test" + } + end + + http.request_uri = function(self, uri, params) + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_custom_format_log(nil, nil) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch custom body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- response_body +hello world +--- wait: 2 +--- error_log +check elasticsearch custom body success diff --git a/t/plugin/example.t b/t/plugin/example.t index 985aa11f111b..21972d290a08 100644 --- a/t/plugin/example.t +++ b/t/plugin/example.t @@ -165,12 +165,15 @@ GET /t --- response_body plugin name: example-plugin priority: 0 --- yaml_config -etcd: - host: - - "http://127.0.0.1:2379" # etcd address - prefix: "/apisix" # apisix configurations prefix - timeout: 1 - +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" # etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 1 plugins: - example-plugin - not-exist-plugin diff --git a/t/plugin/ext-plugin/sanity-openresty-1-19.t b/t/plugin/ext-plugin/sanity2.t similarity index 86% rename from t/plugin/ext-plugin/sanity-openresty-1-19.t rename to t/plugin/ext-plugin/sanity2.t index c33d86e007fb..206e7b090c2d 100644 --- a/t/plugin/ext-plugin/sanity-openresty-1-19.t +++ b/t/plugin/ext-plugin/sanity2.t @@ -14,16 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version =~ m/\/1.17.8/) { - plan(skip_all => "require OpenResty 1.19+"); -} else { - plan('no_plan'); -} +use t::APISIX 'no_plan'; repeat_each(1); no_long_string(); diff --git a/t/plugin/google-cloud-logging2.t b/t/plugin/google-cloud-logging2.t index 4b52ebc6a8ea..5c60775de4b7 100644 --- a/t/plugin/google-cloud-logging2.t +++ b/t/plugin/google-cloud-logging2.t @@ -58,24 +58,8 @@ __DATA__ } } - local expected = { - node = { - value = { - plugins = { - ["google-cloud-logging"] = { - max_retry_count = 0, - retry_delay = 1, - buffer_duration = 60, - batch_max_size = 1000, - inactive_timeout = 5, - } - } - } - } - } - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config, expected) + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) if code >= 300 then ngx.status = code diff --git a/t/plugin/grpc-transcode.t b/t/plugin/grpc-transcode.t index 78baac0ff73b..a005c0113096 100644 --- a/t/plugin/grpc-transcode.t +++ b/t/plugin/grpc-transcode.t @@ -42,7 +42,7 @@ __DATA__ content_by_lua_block { local t = require("lib.test_admin").test local etcd = require("apisix.core.etcd") - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -64,7 +64,7 @@ __DATA__ end ngx.say(body) - local res = assert(etcd.get('/proto/1')) + local res = assert(etcd.get('/protos/1')) local create_time = res.body.node.value.create_time assert(create_time ~= nil, "create_time is nil") local update_time = res.body.node.value.update_time @@ -86,7 +86,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/2', + local code, body = t('/apisix/admin/protos/2', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -123,7 +123,7 @@ passed location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/2', + local code, body = t('/apisix/admin/protos/2', ngx.HTTP_DELETE ) @@ -317,7 +317,7 @@ Connection refused) while connecting to upstream location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -562,7 +562,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -606,7 +606,6 @@ passed [[{ "methods": ["GET", "POST"], "uri": "/grpctest", - "service_protocol": "grpc", "plugins": { "grpc-transcode": { "proto_id": "1", @@ -669,7 +668,7 @@ GET /t location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -771,7 +770,6 @@ failed to encode request data to protobuf [[{ "methods": ["GET", "POST"], "uri": "/grpctest", - "service_protocol": "grpc", "plugins": { "grpc-transcode": { "proto_id": "1", diff --git a/t/plugin/grpc-transcode2.t b/t/plugin/grpc-transcode2.t index 7c8286650f50..3bdede0999a4 100644 --- a/t/plugin/grpc-transcode2.t +++ b/t/plugin/grpc-transcode2.t @@ -41,7 +41,7 @@ __DATA__ location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -135,7 +135,7 @@ Content-Type: application/json location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/2', + local code, body = t('/apisix/admin/protos/2', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -240,7 +240,7 @@ failed to encode request data to protobuf local content = t.read_file("t/grpc_server_example/proto.pb") local data = {content = ngx.encode_base64(content)} - local code, body = t.test('/apisix/admin/proto/1', + local code, body = t.test('/apisix/admin/protos/1', ngx.HTTP_PUT, json.encode(data) ) @@ -388,7 +388,7 @@ Undefined service method location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -500,12 +500,8 @@ qr/request log: \{.*body":\"\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003\\u0000 content_by_lua_block { local t = require("lib.test_admin").test local code, message = t('/apisix/admin/global_rules/1', - ngx.HTTP_DELETE, - nil, - [[{ - "action": "delete" - }]] - ) + ngx.HTTP_DELETE + ) ngx.say("[delete] code: ", code, " message: ", message) } } @@ -581,7 +577,7 @@ qr/request log: \{.*body":\"\{\\"result\\":3}/ content_by_lua_block { local http = require "resty.http" local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; @@ -700,7 +696,7 @@ set protobuf option: int64_as_string content_by_lua_block { local http = require "resty.http" local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/proto/1', + local code, body = t('/apisix/admin/protos/1', ngx.HTTP_PUT, [[{ "content" : "syntax = \"proto3\"; diff --git a/t/plugin/grpc-transcode3.t b/t/plugin/grpc-transcode3.t new file mode 100644 index 000000000000..97208788ab1b --- /dev/null +++ b/t/plugin/grpc-transcode3.t @@ -0,0 +1,124 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set rule +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayMultipleHello(MultipleHelloRequest) returns (MultipleHelloReply) {} + } + + enum Gender { + GENDER_UNKNOWN = 0; + GENDER_MALE = 1; + GENDER_FEMALE = 2; + } + + message Person { + string name = 1; + int32 age = 2; + } + + message MultipleHelloRequest { + string name = 1; + repeated string items = 2; + repeated Gender genders = 3; + repeated Person persons = 4; + } + + message MultipleHelloReply{ + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayMultipleHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route +--- request +POST /grpctest +{"name":"world","persons":[{"name":"Joe","age":1},{"name":"Jake","age":2}]} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"message":"Hello world, name: Joe, age: 1, name: Jake, age: 2"} diff --git a/t/plugin/hmac-auth2.t b/t/plugin/hmac-auth2.t index 4358ef0f8cb8..a845f3de95fd 100644 --- a/t/plugin/hmac-auth2.t +++ b/t/plugin/hmac-auth2.t @@ -400,7 +400,7 @@ x-real-ip: 127.0.0.1 ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code @@ -436,7 +436,7 @@ x-real-ip: 127.0.0.1 ngx.HTTP_GET, nil, [[ -{"properties":{"disable":{"type":"boolean"}},"title":"work with route or service object","type":"object"} +{"properties":{},"title":"work with route or service object","type":"object"} ]] ) ngx.status = code diff --git a/t/plugin/http-logger2.t b/t/plugin/http-logger2.t index 688bc8bc3e01..f99c11483d9d 100644 --- a/t/plugin/http-logger2.t +++ b/t/plugin/http-logger2.t @@ -42,6 +42,13 @@ add_block_preprocessor(sub { } } + location /http-logger/Authorization { + content_by_lua_block { + ngx.log(ngx.WARN, "received Authorization header: [", ngx.var.http_authorization, "]") + ngx.say("OK") + } + } + location /http-logger/center { content_by_lua_block { local function str_split(str, reps) @@ -253,3 +260,52 @@ test-http-logger-response request.body:test-http-logger-request response.body:test-http-logger-response --- wait: 1.5 + + + +=== TEST 8: test default Authorization header sent to the log server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["POST"], + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001/http-logger/Authorization", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:12001": 1 + }, + "type": "roundrobin" + }, + "uri": "/http-logger/test" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit +--- request +POST /http-logger/test +test-http-logger-request +--- error_log +received Authorization header: [nil] +--- wait: 1.5 diff --git a/t/plugin/ip-restriction.t b/t/plugin/ip-restriction.t index d2cbc75c0325..d32c8b2e41a4 100644 --- a/t/plugin/ip-restriction.t +++ b/t/plugin/ip-restriction.t @@ -587,7 +587,9 @@ qr/failed to validate item 1: object matches none of the required/ "blacklist": [ "127.0.0.0/24" ], - "disable": true + "_meta": { + "disable": true + } } } }]] diff --git a/t/plugin/jwt-auth.t b/t/plugin/jwt-auth.t index 197a4259453f..81cf8c935834 100644 --- a/t/plugin/jwt-auth.t +++ b/t/plugin/jwt-auth.t @@ -54,7 +54,7 @@ __DATA__ } } --- response_body_like eval -qr/{"algorithm":"HS256","base64_secret":false,"exp":86400,"key":"123","secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ +qr/{"algorithm":"HS256","base64_secret":false,"exp":86400,"key":"123","lifetime_grace_period":0,"secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ @@ -498,7 +498,7 @@ property "key" is required local code, body, raw = t('/apisix/admin/schema/plugins/jwt-auth?schema_type=consumer', ngx.HTTP_GET, [[ -{"dependencies":{"algorithm":{"oneOf":[{"properties":{"algorithm":{"default":"HS256","enum":["HS256","HS512"]}}},{"required":["public_key","private_key"],"properties":{"algorithm":{"enum":["RS256"]},"public_key":{"type":"string"},"private_key":{"type":"string"}}}]}},"required":["key"],"type":"object","properties":{"base64_secret":{"default":false,"type":"boolean"},"secret":{"type":"string"},"algorithm":{"enum":["HS256","HS512","RS256"],"default":"HS256","type":"string"},"exp":{"minimum":1,"default":86400,"type":"integer"},"key":{"type":"string"}}} +{"dependencies":{"algorithm":{"oneOf":[{"properties":{"algorithm":{"default":"HS256","enum":["HS256","HS512"]}}},{"required":["public_key","private_key"],"properties":{"algorithm":{"enum":["RS256","ES256"]},"public_key":{"type":"string"},"private_key":{"type":"string"}}}]}},"required":["key"],"type":"object","properties":{"base64_secret":{"default":false,"type":"boolean"},"secret":{"type":"string"},"algorithm":{"enum":["HS256","HS512","RS256","ES256"],"default":"HS256","type":"string"},"exp":{"minimum":1,"default":86400,"type":"integer"},"key":{"type":"string"}}} ]] ) @@ -517,7 +517,7 @@ property "key" is required ngx.HTTP_GET, nil, [[ - {"properties":{"disable":{"type":"boolean"}},"type":"object"} + {"properties":{},"type":"object"} ]] ) ngx.status = code @@ -535,7 +535,7 @@ property "key" is required ngx.HTTP_GET, nil, [[ - {"properties":{"disable":{"type":"boolean"}},"type":"object"} + {"properties":{},"type":"object"} ]] ) ngx.status = code @@ -957,7 +957,7 @@ qr/failed to sign jwt/ } } --- response_body_like eval -qr/{"algorithm":"HS512","base64_secret":false,"exp":86400,"key":"123","secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ +qr/{"algorithm":"HS512","base64_secret":false,"exp":86400,"key":"123","lifetime_grace_period":0,"secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ @@ -1083,7 +1083,7 @@ hello world content_by_lua_block { local plugin = require("apisix.plugins.jwt-auth") local core = require("apisix.core") - local conf = {key = "123", algorithm = "ES256"} + local conf = {key = "123", algorithm = "ES512"} local ok, err = plugin.check_schema(conf, core.schema.TYPE_CONSUMER) if not ok then @@ -1126,7 +1126,7 @@ base64_secret required but the secret is not in base64 format location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body, res_data = t('/apisix/admin/consumers', + local code, body, res = t('/apisix/admin/consumers', ngx.HTTP_PUT, [[{ "username": "kerouac", @@ -1136,29 +1136,18 @@ base64_secret required but the secret is not in base64 format "secret": "my-secret-key" } } - }]], - [[{ - "node": { - "value": { - "username": "kerouac", - "plugins": { - "jwt-auth": { - "key": "exp-not-set", - "secret": "my-secret-key" - } - } - } - }, - "action": "set" }]] - ) + ) + + res = require("toolkit.json").decode(res) + assert(res.value.plugins["jwt-auth"].exp == 86400) ngx.status = code - ngx.say(require("toolkit.json").encode(res_data)) + ngx.say(body) } } ---- response_body_like eval -qr/"exp":86400/ +--- response_body +passed @@ -1235,3 +1224,94 @@ qr/failed to validate dependent schema for \\"algorithm\\"/ --- error_code: 400 --- response_body_like eval qr/failed to validate dependent schema for \\"algorithm\\"/ + + + +=== TEST 52: add consumer with username and plugins with public_key, private_key(ES256) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-es256", + "algorithm": "ES256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEVs/o5+uQbTjL3chynL4wXgUg2R9\nq9UU8I5mEovUf86QZ7kOBIjJwqnzD1omageEHWwHdBO6B+dFabmdT9POxg==\n-----END PUBLIC KEY-----", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgevZzL1gdAFr88hb2\nOF/2NxApJCzGCEDdfSp6VQO30hyhRANCAAQRWz+jn65BtOMvdyHKcvjBeBSDZH2r\n1RTwjmYSi9R/zpBnuQ4EiMnCqfMPWiZqB4QdbAd0E7oH50VpuZ1P087G\n-----END PRIVATE KEY-----" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 53: JWT sign and verify use ES256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 54: sign/verify use ES256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, sign = t('/apisix/plugin/jwt/sign?key=user-key-es256', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world diff --git a/t/plugin/jwt-auth2.t b/t/plugin/jwt-auth2.t index a6c6f31ad7e7..6225133878a6 100644 --- a/t/plugin/jwt-auth2.t +++ b/t/plugin/jwt-auth2.t @@ -147,3 +147,322 @@ GET /hello jwt-header: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs --- response_body hello world + + + +=== TEST 8: use lifetime_grace_period default value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- in order to modify the system_leeway in jwt-validators module + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANW16kX5SMrMa2t7F2R1w6Bk/qpjS4QQ\n]] .. + [[hnrbED3Dpsl9JXAx90MYsIWp51hBxJSE/EPVK8WF/sjHK1xQbEuDfEECAwEAAQ==\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9" .. + ".eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk" .. + "4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCB" .. + "jb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.u1ISx7JbuK_GFRIUqIMP175FqX" .. + "RyF9V7y86480Q4N3jNxs3ePbc51TFtIHDrKttstU4Tub28PYVSlr-HXfjo7w", + } + }) + ngx.status = res.status + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- add consumer + local code, body, res_data = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "exp": 1, + "algorithm": "HS256", + "base64_secret": false, + "secret": "test-jwt-secret", + "key": "test-jwt-a" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- add route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "query": "jwt", + "header": "Mytoken", + "cookie": "jwt" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- resgiter jwt sign api + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwt/sign" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- get JWT token + local code, err, sign = t('/apisix/plugin/jwt/sign?key=test-jwt-a', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + -- verify JWT token + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + + -- the JWT has not expired, so it should be valid + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- after 1.1 seconds, the JWT should be expired, because the exp is only 1 second + ngx.sleep(1.1) + res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + ngx.status = res.status + ngx.print(res.body) + } + } +--- error_code: 401 +--- response_body eval +qr/failed to verify jwt/ +--- error_log eval +qr/ailed to verify jwt: 'exp' claim expired at/ + + + +=== TEST 9: lifetime_grace_period is 2 seconds +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- in order to modify the system_leeway in jwt-validators module + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANW16kX5SMrMa2t7F2R1w6Bk/qpjS4QQ\n]] .. + [[hnrbED3Dpsl9JXAx90MYsIWp51hBxJSE/EPVK8WF/sjHK1xQbEuDfEECAwEAAQ==\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9" .. + ".eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk" .. + "4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCB" .. + "jb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.u1ISx7JbuK_GFRIUqIMP175FqX" .. + "RyF9V7y86480Q4N3jNxs3ePbc51TFtIHDrKttstU4Tub28PYVSlr-HXfjo7w", + } + }) + ngx.status = res.status + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- add consumer + local code, body, res_data = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "exp": 1, + "algorithm": "HS256", + "base64_secret": false, + "secret": "test-jwt-secret", + "key": "test-jwt-a", + "lifetime_grace_period": 2 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- add route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "query": "jwt", + "header": "Mytoken", + "cookie": "jwt" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- resgiter jwt sign api + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwt/sign" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- get JWT token + local code, err, sign = t('/apisix/plugin/jwt/sign?key=test-jwt-a', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + -- verify JWT token + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + + -- after 1.1 seconds, since lifetime_grace_period is 2 seconds, + -- so the JWT has not expired, it should be valid + ngx.sleep(1.1) + local res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + ngx.status = res.status + ngx.print(res.body) + } + } +--- response_body +hello world diff --git a/t/plugin/key-auth.t b/t/plugin/key-auth.t index a3483573a7d2..33e86fe19d13 100644 --- a/t/plugin/key-auth.t +++ b/t/plugin/key-auth.t @@ -188,7 +188,7 @@ GET /hello code, body = t('/apisix/admin/consumers', ngx.HTTP_PUT, string.format('{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}', username, key), - string.format('{"node":{"value":{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}},"action":"set"}', username, key) + string.format('{"value":{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}}', username, key) ) end diff --git a/t/plugin/ldap-auth.t b/t/plugin/ldap-auth.t index 9ecac330f948..31b7a643013e 100644 --- a/t/plugin/ldap-auth.t +++ b/t/plugin/ldap-auth.t @@ -202,6 +202,8 @@ Authorization: Basic Zm9vOmZvbwo= --- error_code: 401 --- response_body {"message":"Invalid user authorization"} +--- error_log +The supplied credential is invalid @@ -302,7 +304,7 @@ find consumer user01 ngx.HTTP_GET, nil, [[ -{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"disable":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} +{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"tls_verify":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] ) ngx.status = code @@ -338,8 +340,107 @@ find consumer user01 ngx.HTTP_GET, nil, [[ -{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"disable":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] +{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"tls_verify":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] ) ngx.status = code } } + + + +=== TEST 17: enable ldap-auth with tls +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "localhost:1636", + "uid": "cn", + "use_tls": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 19: enable ldap-auth with tls, verify CA +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "localhost:1636", + "uid": "cn", + "use_tls": true, + "tls_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 diff --git a/t/plugin/limit-count2.t b/t/plugin/limit-count2.t index 621edad8a912..0dadaf78e990 100644 --- a/t/plugin/limit-count2.t +++ b/t/plugin/limit-count2.t @@ -764,3 +764,42 @@ limit key: afafafhao2:remote_addr limit key: afafafhao2:remote_addr --- response_body [200,200,503,503] + + + +=== TEST 22: group with disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "abcd", + "_meta": { + "disable": false + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/plugin/limit-count3.t b/t/plugin/limit-count3.t index 4298a20bd604..0c5490616ad2 100644 --- a/t/plugin/limit-count3.t +++ b/t/plugin/limit-count3.t @@ -224,3 +224,64 @@ passed } --- response_body [200,200] + + + +=== TEST 7: set another route with the same conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 61 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: avoid sharing the same counter +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello1" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200] diff --git a/t/plugin/log-rotate2.t b/t/plugin/log-rotate2.t index 1a28f33e8829..617a29b5a7d2 100644 --- a/t/plugin/log-rotate2.t +++ b/t/plugin/log-rotate2.t @@ -61,7 +61,7 @@ __DATA__ location /t { content_by_lua_block { ngx.log(ngx.ERR, "start xxxxxx") - ngx.sleep(2.5) + ngx.sleep(3.5) local has_split_access_file = false local has_split_error_file = false local lfs = require("lfs") @@ -105,7 +105,7 @@ start xxxxxx --- config location /t { content_by_lua_block { - ngx.sleep(2) + ngx.sleep(3) local default_logs = {} for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do diff --git a/t/plugin/log-rotate3.t b/t/plugin/log-rotate3.t new file mode 100644 index 000000000000..bfab0f9b63e9 --- /dev/null +++ b/t/plugin/log-rotate3.t @@ -0,0 +1,141 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + admin_key: ~ +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 86400 + max_size: 9 + max_kept: 3 + enable_compression: false +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: log rotate by max_size +--- config + location /t { + content_by_lua_block { + ngx.log(ngx.ERR, "start xxxxxx") + ngx.sleep(2) + local has_split_access_file = false + local has_split_error_file = false + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__access.log$") then + has_split_access_file = true + end + + if string.match(file_name, "__error.log$") then + has_split_error_file = true + end + end + + if not has_split_access_file and has_split_error_file then + ngx.status = 200 + else + ngx.status = 500 + end + } + } + + + +=== TEST 2: in current log +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.1) + ngx.log(ngx.WARN, "start xxxxxx") + ngx.say("done") + } + } +--- response_body +done +--- error_log +start xxxxxx + + + +=== TEST 3: check file changes +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + + local default_logs = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log$") or string.match(file_name, "__access.log$") then + local filepath = ngx.config.prefix() .. "/logs/" .. file_name + local attr = lfs.attributes(filepath) + if attr then + default_logs[filepath] = { change = attr.change, size = attr.size } + end + end + end + + ngx.sleep(1) + + local passed = false + for filepath, origin_attr in pairs(default_logs) do + local check_attr = lfs.attributes(filepath) + if check_attr.change == origin_attr.change and check_attr.size == origin_attr.size then + passed = true + else + passed = false + break + end + end + + if passed then + ngx.say("passed") + end + } + } +--- response_body +passed diff --git a/t/plugin/opa2.t b/t/plugin/opa2.t index 75d9632ba26a..d14269ce696c 100644 --- a/t/plugin/opa2.t +++ b/t/plugin/opa2.t @@ -56,7 +56,9 @@ __DATA__ "username": "test", "plugins": { "key-auth": { - "disable": false, + "_meta": { + "disable": false + }, "key": "test-key" } } @@ -68,7 +70,9 @@ __DATA__ "name": "s1", "plugins": { "key-auth": { - "disable": false + "_meta": { + "disable": false + } } } }]], diff --git a/t/plugin/openfunction.t b/t/plugin/openfunction.t new file mode 100644 index 000000000000..8da960df4d40 --- /dev/null +++ b/t/plugin/openfunction.t @@ -0,0 +1,335 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({function_uri = "http://127.0.0.1:30585/default/test-body"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing `function_uri` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({timeout = 60000}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "function_uri" is required + + + +=== TEST 3: wrong type for `function_uri` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({function_uri = 30858}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "function_uri" validation failed: wrong type: expected string, got number + + + +=== TEST 4: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584/function-sample" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit route (with GET request) +--- request +GET /hello +--- response_body +Hello, function-sample! + + + +=== TEST 6: reset route with test-body function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30585/default/test-body" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit route with POST method +--- request +POST /hello +test +--- response_body +Hello, test! + + + +=== TEST 8: reset route with test-header function with service_token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30583/", + "authorization": { + "service_token": "test:test" + } + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route with POST request with service_token +--- request +POST /hello +--- response_body chomp +[Basic dGVzdDp0ZXN0] + + + +=== TEST 10: reset route with test-header function without service_token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30583/" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit route with user-specific Authorization header +--- request +POST /hello +--- more_headers +authorization: user-token-xxx +--- response_body chomp +[user-token-xxx] + + + +=== TEST 12: reset route to non-existent function_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584/default/non-existent" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route (with non-existent function_uri) +--- request +POST /hello +test +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 404 +--- response_body_like eval +qr/not found/ + + + +=== TEST 14: reset route with test-uri function and path forwarding +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit route with GET method +--- request +GET /hello/openfunction +--- response_body +Hello, openfunction! diff --git a/t/plugin/openid-connect.t b/t/plugin/openid-connect.t index 22786eaea9f2..9337e4235d18 100644 --- a/t/plugin/openid-connect.t +++ b/t/plugin/openid-connect.t @@ -109,7 +109,8 @@ done "redirect_uri": "https://iresty.com", "ssl_verify": false, "timeout": 10, - "scope": "apisix" + "scope": "apisix", + "use_pkce": false } }, "upstream": { @@ -918,7 +919,7 @@ OIDC introspection failed: invalid token --- request GET /t --- response_body -{"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","introspection_endpoint_auth_method":"client_secret_basic","logout_path":"/logout","realm":"apisix","scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_refresh_token_header":false,"set_userinfo_header":true,"ssl_verify":false,"timeout":3} +{"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","introspection_endpoint_auth_method":"client_secret_basic","logout_path":"/logout","realm":"apisix","scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_refresh_token_header":false,"set_userinfo_header":true,"ssl_verify":false,"timeout":3,"use_pkce":false} --- no_error_log [error] @@ -1185,3 +1186,80 @@ GET /t http://127.0.0.1:.*/hello --- no_error_log [error] + + + +=== TEST 30: Switch route URI back to `/hello` and enable pkce. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "use_pkce": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 31: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider with code_challenge parameters. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 and + string.match(location, '.*code_challenge=.*') and + string.match(location, '.*code_challenge_method=S256.*') then + ngx.say(true) + end + } + } +--- request +GET /t +--- timeout: 10s +--- response_body +true +--- error_code: 302 +--- no_error_log +[error] diff --git a/t/plugin/opentelemetry.t b/t/plugin/opentelemetry.t index 0c142667a1ab..759b248c6a80 100644 --- a/t/plugin/opentelemetry.t +++ b/t/plugin/opentelemetry.t @@ -553,6 +553,7 @@ plugin_attr: --- extra_init_by_lua local core = require("apisix.core") local otlp = require("opentelemetry.trace.exporter.otlp") + local span_kind = require("opentelemetry.trace.span_kind") otlp.export_spans = function(self, spans) if (#spans ~= 1) then ngx.log(ngx.ERR, "unexpected spans length: ", #spans) @@ -565,6 +566,12 @@ plugin_attr: return end + local current_span_kind = span:plain().kind + if current_span_kind ~= span_kind.server then + ngx.log(ngx.ERR, "expected span.kind to be server but got ", current_span_kind) + return + end + if span.name ~= "/opentracing?foo=bar&a=b" then ngx.log(ngx.ERR, "expect span name: /opentracing?foo=bar&a=b, but got ", span.name) return diff --git a/t/plugin/opentelemetry2.t b/t/plugin/opentelemetry2.t index f173d125ba91..2495d8ef2adf 100644 --- a/t/plugin/opentelemetry2.t +++ b/t/plugin/opentelemetry2.t @@ -142,3 +142,88 @@ plugin body_filter phase opentelemetry context current opentelemetry context current opentelemetry export span + + + +=== TEST 3: set additional_attributes with match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + }, + "additional_header_prefix_attributes": [ + "x-my-header-*" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/attributes" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: opentelemetry expands headers +--- extra_init_by_lua + local otlp = require("opentelemetry.trace.exporter.otlp") + otlp.export_spans = function(self, spans) + if (#spans ~= 1) then + ngx.log(ngx.ERR, "unexpected spans length: ", #spans) + return + end + + local attributes_names = {} + local attributes = {} + local span = spans[1] + for _, attribute in ipairs(span.attributes) do + if attribute.key == "hostname" then + -- remove any randomness + goto skip + end + table.insert(attributes_names, attribute.key) + attributes[attribute.key] = attribute.value.string_value or "" + ::skip:: + end + table.sort(attributes_names) + for _, attribute in ipairs(attributes_names) do + ngx.log(ngx.INFO, "attribute " .. attribute .. ": \"" .. attributes[attribute] .. "\"") + end + + ngx.log(ngx.INFO, "opentelemetry export span") + end +--- request +GET /attributes +--- more_headers +x-my-header-name: william +x-my-header-nick: bill +--- wait: 1 +--- error_code: 404 +--- grep_error_log eval +qr/attribute .+?:.[^,]*/ +--- grep_error_log_out +attribute route: "route_name" +attribute service: "" +attribute x-my-header-name: "william" +attribute x-my-header-nick: "bill" diff --git a/t/plugin/openwhisk.t b/t/plugin/openwhisk.t index 4d89bbe57b9b..0d7e73a96b23 100644 --- a/t/plugin/openwhisk.t +++ b/t/plugin/openwhisk.t @@ -244,7 +244,7 @@ qr/"error":"The requested resource does not exist."/ [[{ "plugins": { "openwhisk": { - "api_host": "http://127.0.0.0:3233", + "api_host": "http://127.0.0.1:1979", "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", "namespace": "guest", "action": "non-existent" diff --git a/t/plugin/plugin.t b/t/plugin/plugin.t index e45f5d5f7814..5baaed8011a6 100644 --- a/t/plugin/plugin.t +++ b/t/plugin/plugin.t @@ -284,7 +284,7 @@ GET /hello error_response = "OK" }}, }) do - local code, body = t('/apisix/admin/global_rules/1', + local code, body = t('/apisix/admin/plugin_configs/1', ngx.HTTP_PUT, { plugins = { @@ -306,3 +306,222 @@ GET /hello {"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: wrong type: expected object, got boolean"} {"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: property \"error_response\" validation failed: value should match only one schema, but matches none"} passed + + + +=== TEST 10: invalid _meta filter vars schema with wrong type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = { + filter = "arg_k == v" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: property \"filter\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 11: invalid _meta filter schema with wrong expr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for _, filter in ipairs({ + {"arg_name", "==", "json"}, + { + {"arg_name", "*=", "json"} + } + }) do + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = { + filter = filter + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + end + } + } +--- response_body +{"error_msg":"failed to validate the 'vars' expression: rule should be wrapped inside brackets"} +{"error_msg":"failed to validate the 'vars' expression: invalid operator '*='"} + + + +=== TEST 12: proxy-rewrite plugin run with _meta filter vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v2"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v2" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 13: hit route: run proxy-rewrite plugin +--- request +GET /hello?version=v2 +--- response_headers +x-api-version: v2 + + + +=== TEST 14: hit route: not run proxy-rewrite plugin +--- request +GET /hello?version=v1 +--- response_body +hello world + + + +=== TEST 15: different route,same plugin, different filter (for expr_lrucache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v3"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v3" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello1" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 16: hit route: run proxy-rewrite plugin +--- request +GET /hello1?version=v3 +--- response_headers +x-api-version: v3 + + + +=== TEST 17: same plugin, same id between routes and global_rules, different filter (for expr_lrucache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/2', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v4"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v4" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 18: hit route: run proxy-rewrite plugin +--- request +GET /hello1?version=v4 +--- response_headers +x-api-version: v4 diff --git a/t/plugin/prometheus2.t b/t/plugin/prometheus2.t index a2715d6d6d10..178751e1f224 100644 --- a/t/plugin/prometheus2.t +++ b/t/plugin/prometheus2.t @@ -352,9 +352,9 @@ GET /apisix/prometheus/metrics "syslog": { "host": "127.0.0.1", "include_req_body": false, - "max_retry_times": 1, + "max_retry_count": 1, "tls": false, - "retry_interval": 1, + "retry_delay": 1, "batch_max_size": 1000, "buffer_duration": 60, "port": 1000, @@ -914,3 +914,14 @@ GET /hello GET /apisix/prometheus/metrics --- response_body eval qr/apisix_bandwidth\{type="egress",route="1",service="service_name",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 50: fetch the prometheus shared dict data +--- http_config +lua_shared_dict test-shared-dict 10m; +--- request +GET /apisix/prometheus/metrics +--- response_body_like +.*apisix_shared_dict_capacity_bytes\{name="test-shared-dict"\} 10485760(?:.|\n)* +apisix_shared_dict_free_space_bytes\{name="test-shared-dict"\} \d+.* diff --git a/t/plugin/prometheus4.t b/t/plugin/prometheus4.t new file mode 100644 index 000000000000..93302028e68b --- /dev/null +++ b/t/plugin/prometheus4.t @@ -0,0 +1,140 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/metrics', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + } + } + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/10', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: client request +--- yaml_config +plugin_attr: + prometheus: + metrics: + bandwidth: + extra_labels: + - upstream_addr: $upstream_addr + - upstream_status: $upstream_status +--- request +GET /hello + + + +=== TEST 4: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="10",service="",consumer="",node="127.0.0.1",upstream_addr="127.0.0.1:1980",upstream_status="200"\} \d+/ + + + +=== TEST 5: client request, label with nonexist ngx variable +--- yaml_config +plugin_attr: + prometheus: + metrics: + http_status: + extra_labels: + - dummy: $dummy +--- request +GET /hello + + + +=== TEST 6: fetch the prometheus metric data, with nonexist ngx variable +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="200",route="10",matched_uri="\/hello",matched_host="",service="",consumer="",node="127.0.0.1",dummy=""\} \d+/ diff --git a/t/plugin/proxy-rewrite.t b/t/plugin/proxy-rewrite.t index fbca1b621b51..076f236afba3 100644 --- a/t/plugin/proxy-rewrite.t +++ b/t/plugin/proxy-rewrite.t @@ -41,8 +41,7 @@ __DATA__ local plugin = require("apisix.plugins.proxy-rewrite") local ok, err = plugin.check_schema({ uri = '/apisix/home', - host = 'apisix.iresty.com', - scheme = 'http' + host = 'apisix.iresty.com' }) if not ok then ngx.say(err) @@ -60,34 +59,7 @@ done -=== TEST 2: wrong value of key ---- config - location /t { - content_by_lua_block { - local plugin = require("apisix.plugins.proxy-rewrite") - local ok, err = plugin.check_schema({ - uri = '/apisix/home', - host = 'apisix.iresty.com', - scheme = 'tcp' - }) - if not ok then - ngx.say(err) - end - - ngx.say("done") - } - } ---- request -GET /t ---- response_body -property "scheme" validation failed: matches none of the enum values -done ---- no_error_log -[error] - - - -=== TEST 3: add plugin +=== TEST 2: add plugin --- config location /t { content_by_lua_block { @@ -98,7 +70,6 @@ done "plugins": { "proxy-rewrite": { "uri": "/test/add", - "scheme": "https", "host": "apisix.iresty.com" } }, @@ -127,7 +98,7 @@ passed -=== TEST 4: update plugin +=== TEST 3: update plugin --- config location /t { content_by_lua_block { @@ -138,7 +109,6 @@ passed "plugins": { "proxy-rewrite": { "uri": "/test/update", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -167,7 +137,7 @@ passed -=== TEST 5: disable plugin +=== TEST 4: disable plugin --- config location /t { content_by_lua_block { @@ -202,7 +172,7 @@ passed -=== TEST 6: set route(rewrite host) +=== TEST 5: set route(rewrite host) --- config location /t { content_by_lua_block { @@ -214,7 +184,6 @@ passed "plugins": { "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -243,7 +212,7 @@ passed -=== TEST 7: rewrite host +=== TEST 6: rewrite host --- request GET /hello HTTP/1.1 --- response_body @@ -255,7 +224,7 @@ scheme: http -=== TEST 8: set route(rewrite host + scheme) +=== TEST 7: set route(rewrite host + upstream scheme is https) --- config location /t { content_by_lua_block { @@ -267,11 +236,11 @@ scheme: http "plugins": { "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", - "scheme": "https", "host": "test.com" } }, "upstream": { + "scheme": "https", "nodes": { "127.0.0.1:1983": 1 }, @@ -296,7 +265,7 @@ passed -=== TEST 9: rewrite host + scheme +=== TEST 8: rewrite host + upstream scheme is https --- request GET /hello HTTP/1.1 --- response_body @@ -308,7 +277,7 @@ scheme: https -=== TEST 10: set route(rewrite headers) +=== TEST 9: set route(rewrite headers) --- config location /t { content_by_lua_block { @@ -349,7 +318,7 @@ passed -=== TEST 11: rewrite headers +=== TEST 10: rewrite headers --- request GET /hello HTTP/1.1 --- more_headers @@ -364,7 +333,7 @@ x-real-ip: 127.0.0.1 -=== TEST 12: set route(add headers) +=== TEST 11: set route(add headers) --- config location /t { content_by_lua_block { @@ -405,7 +374,7 @@ passed -=== TEST 13: add headers +=== TEST 12: add headers --- request GET /hello HTTP/1.1 --- response_body @@ -418,7 +387,7 @@ x-real-ip: 127.0.0.1 -=== TEST 14: set route(rewrite empty headers) +=== TEST 13: set route(rewrite empty headers) --- config location /t { content_by_lua_block { @@ -459,7 +428,7 @@ passed -=== TEST 15: rewrite empty headers +=== TEST 14: rewrite empty headers --- request GET /hello HTTP/1.1 --- more_headers @@ -474,7 +443,7 @@ x-real-ip: 127.0.0.1 -=== TEST 16: set route(rewrite uri args) +=== TEST 15: set route(rewrite uri args) --- config location /t { content_by_lua_block { @@ -512,7 +481,7 @@ passed -=== TEST 17: rewrite uri args +=== TEST 16: rewrite uri args --- request GET /hello?q=apisix&a=iresty HTTP/1.1 --- response_body @@ -524,7 +493,7 @@ q: apisix -=== TEST 18: set route(rewrite uri empty args) +=== TEST 17: set route(rewrite uri empty args) --- config location /t { content_by_lua_block { @@ -562,7 +531,7 @@ passed -=== TEST 19: rewrite uri empty args +=== TEST 18: rewrite uri empty args --- request GET /hello HTTP/1.1 --- response_body @@ -572,7 +541,7 @@ uri: /plugin_proxy_rewrite_args -=== TEST 20: remove header +=== TEST 19: remove header --- config location /t { content_by_lua_block { @@ -614,7 +583,7 @@ passed -=== TEST 21: remove header +=== TEST 20: remove header --- request GET /hello HTTP/1.1 --- more_headers @@ -630,7 +599,7 @@ x-real-ip: 127.0.0.1 -=== TEST 22: set route(only using regex_uri) +=== TEST 21: set route(only using regex_uri) --- config location /t { content_by_lua_block { @@ -668,7 +637,7 @@ passed -=== TEST 23: hit route(rewrite uri using regex_uri) +=== TEST 22: hit route(rewrite uri using regex_uri) --- request GET /test/plugin/proxy/rewrite HTTP/1.1 --- response_body @@ -680,7 +649,7 @@ scheme: http -=== TEST 24: hit route(404 not found) +=== TEST 23: hit route(404 not found) --- request GET /test/not/found HTTP/1.1 --- error_code: 404 @@ -689,7 +658,7 @@ GET /test/not/found HTTP/1.1 -=== TEST 25: set route(Using both uri and regex_uri) +=== TEST 24: set route(Using both uri and regex_uri) --- config location /t { content_by_lua_block { @@ -728,7 +697,7 @@ passed -=== TEST 26: hit route(rewrite uri using uri & regex_uri property) +=== TEST 25: hit route(rewrite uri using uri & regex_uri property) --- request GET /test/hello HTTP/1.1 --- response_body @@ -738,7 +707,7 @@ hello world -=== TEST 27: set route(invalid regex_uri) +=== TEST 26: set route(invalid regex_uri) --- config location /t { content_by_lua_block { @@ -776,7 +745,7 @@ GET /t -=== TEST 28: set route(invalid regex syntax for the first element) +=== TEST 27: set route(invalid regex syntax for the first element) --- config location /t { content_by_lua_block { @@ -816,7 +785,7 @@ qr/invalid regex_uri/ -=== TEST 29: set route(invalid regex syntax for the second element) +=== TEST 28: set route(invalid regex syntax for the second element) --- config location /t { content_by_lua_block { @@ -854,7 +823,7 @@ invalid capturing variable name found -=== TEST 30: set route(invalid uri) +=== TEST 29: set route(invalid uri) --- config location /t { content_by_lua_block { @@ -893,7 +862,7 @@ qr/failed to match pattern/ -=== TEST 31: wrong value of uri +=== TEST 30: wrong value of uri --- config location /t { content_by_lua_block { @@ -918,7 +887,7 @@ property "uri" validation failed: failed to match pattern "^\\/.*" with "home" -=== TEST 32: set route(invalid header field) +=== TEST 31: set route(invalid header field) --- config location /t { content_by_lua_block { @@ -962,7 +931,7 @@ header field: X-Api:Version -=== TEST 33: set route(invalid header value) +=== TEST 32: set route(invalid header value) --- config location /t { content_by_lua_block { @@ -1004,7 +973,7 @@ qr/invalid value character/ -=== TEST 34: set route(rewrite uri with args) +=== TEST 33: set route(rewrite uri with args) --- config location /t { content_by_lua_block { @@ -1042,7 +1011,7 @@ passed -=== TEST 35: rewrite uri with args +=== TEST 34: rewrite uri with args --- request GET /hello?a=iresty --- response_body_like eval @@ -1057,7 +1026,7 @@ q: apisix) -=== TEST 36: print the plugin `conf` in etcd, no dirty data +=== TEST 35: print the plugin `conf` in etcd, no dirty data --- config location /t { content_by_lua_block { @@ -1091,19 +1060,19 @@ q: apisix) end local resp_data = core.json.decode(body) - ngx.say(encode_with_keys_sorted(resp_data.node.value.plugins)) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) } } --- request GET /t --- response_body -{"proxy-rewrite":{"headers":{"X-Api":"v2"},"uri":"/uri/plugin_proxy_rewrite"}} +{"proxy-rewrite":{"headers":{"X-Api":"v2"},"uri":"/uri/plugin_proxy_rewrite","use_real_request_uri_unsafe":false}} --- no_error_log [error] -=== TEST 37: set route(header contains nginx variables) +=== TEST 36: set route(header contains nginx variables) --- config location /t { content_by_lua_block { @@ -1146,7 +1115,7 @@ passed -=== TEST 38: hit route(header supports nginx variables) +=== TEST 37: hit route(header supports nginx variables) --- request GET /hello?name=Bill HTTP/1.1 --- more_headers @@ -1164,7 +1133,7 @@ x-real-ip: 127.0.0.1 -=== TEST 39: set route(nginx variable does not exist) +=== TEST 38: set route(nginx variable does not exist) --- config location /t { content_by_lua_block { @@ -1208,7 +1177,7 @@ passed -=== TEST 40: hit route(get nginx variable is nil) +=== TEST 39: hit route(get nginx variable is nil) --- request GET /hello HTTP/1.1 --- response_body @@ -1221,7 +1190,7 @@ x-real-ip: 127.0.0.1 -=== TEST 41: set route(rewrite uri based on ctx.var) +=== TEST 40: set route(rewrite uri based on ctx.var) --- config location /t { content_by_lua_block { @@ -1259,7 +1228,7 @@ passed -=== TEST 42: hit route(upstream uri: should be /hello) +=== TEST 41: hit route(upstream uri: should be /hello) --- request GET /test?new_uri=hello --- response_body @@ -1269,7 +1238,7 @@ hello world -=== TEST 43: host with port +=== TEST 42: host with port --- config location /t { content_by_lua_block { @@ -1293,7 +1262,7 @@ done -=== TEST 44: set route(rewrite host with port) +=== TEST 43: set route(rewrite host with port) --- config location /t { content_by_lua_block { @@ -1333,7 +1302,7 @@ passed -=== TEST 45: rewrite host with port +=== TEST 44: rewrite host with port --- request GET /hello --- response_body diff --git a/t/plugin/proxy-rewrite2.t b/t/plugin/proxy-rewrite2.t index fcd4011bacec..e6cbc1de0009 100644 --- a/t/plugin/proxy-rewrite2.t +++ b/t/plugin/proxy-rewrite2.t @@ -46,34 +46,7 @@ run_tests; __DATA__ -=== TEST 1: rewrite scheme but the node doesn't have port ---- apisix_yaml -routes: - - - id: 1 - uri: /hello - upstream_id: 1 - plugins: - proxy-rewrite: - scheme: "https" - - - id: 2 - uri: /hello_chunked - upstream_id: 1 -upstreams: - - - id: 1 - nodes: - "127.0.0.1": 1 - type: roundrobin -#END ---- error_code: 503 ---- error_log -Can't detect upstream's scheme - - - -=== TEST 2: access $upstream_uri before proxy-rewrite +=== TEST 1: access $upstream_uri before proxy-rewrite --- apisix_yaml global_rules: - @@ -107,7 +80,7 @@ scheme: http -=== TEST 3: default X-Forwarded-Proto +=== TEST 2: default X-Forwarded-Proto --- apisix_yaml routes: - @@ -128,7 +101,7 @@ X-Forwarded-Proto: http -=== TEST 4: pass X-Forwarded-Proto +=== TEST 3: pass X-Forwarded-Proto --- apisix_yaml routes: - @@ -151,7 +124,7 @@ X-Forwarded-Proto: https -=== TEST 5: customize X-Forwarded-Proto +=== TEST 4: customize X-Forwarded-Proto --- apisix_yaml routes: - @@ -178,7 +151,7 @@ X-Forwarded-Proto: https -=== TEST 6: make sure X-Forwarded-Proto hit the `core.request.header` cache +=== TEST 5: make sure X-Forwarded-Proto hit the `core.request.header` cache --- apisix_yaml routes: - @@ -211,7 +184,7 @@ localhost -=== TEST 7: pass duplicate X-Forwarded-Proto +=== TEST 6: pass duplicate X-Forwarded-Proto --- apisix_yaml routes: - diff --git a/t/plugin/proxy-rewrite3.t b/t/plugin/proxy-rewrite3.t index f98de527fa3f..88e2a9db1cf4 100644 --- a/t/plugin/proxy-rewrite3.t +++ b/t/plugin/proxy-rewrite3.t @@ -51,7 +51,6 @@ __DATA__ "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", "method": "POST", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -97,7 +96,6 @@ plugin_proxy_rewrite get method: POST "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", "method": "GET", - "scheme": "http", "host": "apisix.iresty.com" } }, @@ -138,8 +136,7 @@ plugin_proxy_rewrite get method: GET local ok, err = plugin.check_schema({ uri = '/apisix/home', method = 'GET1', - host = 'apisix.iresty.com', - scheme = 'http' + host = 'apisix.iresty.com' }) if not ok then ngx.say(err) @@ -167,7 +164,6 @@ done "proxy-rewrite": { "uri": "/plugin_proxy_rewrite", "method": "POST", - "scheme": "http", "host": "apisix.iresty.com", "headers":{ "x-api-version":"v1" @@ -200,3 +196,91 @@ passed GET /hello --- error_log plugin_proxy_rewrite get method: POST + + + +=== TEST 8: set route(unsafe uri not normalized at request) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/print_uri_detailed" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: unsafe uri not normalized at request +--- request +GET /print%5Furi%5Fdetailed HTTP/1.1 +--- response_body +ngx.var.uri: /print_uri_detailed +ngx.var.request_uri: /print%5Furi%5Fdetailed + + + +=== TEST 10: set route(safe uri not normalized at request) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/print_uri_detailed" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: safe uri not normalized at request +--- request +GET /print_uri_detailed HTTP/1.1 +--- response_body +ngx.var.uri: /print_uri_detailed +ngx.var.request_uri: /print_uri_detailed diff --git a/t/plugin/redirect.t b/t/plugin/redirect.t index 3b8d87afd787..b46b02c37724 100644 --- a/t/plugin/redirect.t +++ b/t/plugin/redirect.t @@ -443,12 +443,13 @@ Location: https://foo.com:8443/hello -=== TEST 19: redirect(port using `apisix.ssl.listen_port`) +=== TEST 19: redirect(port using `apisix.ssl.listen`) --- yaml_config apisix: ssl: enable: true - listen_port: 9445 + listen: + - port: 9445 --- request GET /hello --- more_headers @@ -649,7 +650,7 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) @@ -1114,3 +1115,38 @@ X-Forwarded-Proto: http --- error_code: 301 --- response_headers Location: https://foo.com:9443/hello + + + +=== TEST 47: wrong configure, enable http_to_https with append_query_string +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "append_query_string": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/error_msg":"failed to check the configuration of plugin redirect err: only one of `http_to_https` and `append_query_string` can be configured."/ +--- no_error_log +[error] diff --git a/t/plugin/redirect2.t b/t/plugin/redirect2.t index ac840e6d4274..24f6f8ebf3fb 100644 --- a/t/plugin/redirect2.t +++ b/t/plugin/redirect2.t @@ -79,3 +79,32 @@ GET /test/hello?o=apache --- response_headers Location: http://test.com/hello?q=apisix&o=apache --- error_code: 302 + + + +=== TEST 3: compatible with old version configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "append_query_string": false + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/plugin/response-rewrite.t b/t/plugin/response-rewrite.t index ebde0d0580a6..d9283e3fe7d9 100644 --- a/t/plugin/response-rewrite.t +++ b/t/plugin/response-rewrite.t @@ -471,7 +471,7 @@ invalid base64 content end local resp_data = core.json.decode(body) - ngx.say(encode_with_keys_sorted(resp_data.node.value.plugins)) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) } } --- request @@ -699,3 +699,50 @@ X-A: 127.0.0.1 X-B: from 127.0.0.1 to 127.0.0.1:1980 --- no_error_log [error] + + + +=== TEST 25: set empty body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 26: hit set empty body +--- request +GET /hello +--- response_body +--- no_error_log +[error] diff --git a/t/plugin/response-rewrite2.t b/t/plugin/response-rewrite2.t index 48401f915308..e3209314632a 100644 --- a/t/plugin/response-rewrite2.t +++ b/t/plugin/response-rewrite2.t @@ -517,3 +517,181 @@ passed GET /hello --- response_body hello world + + + +=== TEST 19: schema check for headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {add = { + {"headers:"} + }}, + {remove = { + {"headers:"} + }}, + {set = { + {"headers"} + }}, + {set = { + {[""] = 1} + }}, + {set = { + {["a"] = true} + }}, + }) do + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({headers = case}) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } +} +--- response_body eval +"property \"headers\" validation failed: object matches none of the required\n" x 5 + + + +=== TEST 20: add headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Cache-Control: no-cache", + "Cache-Control : max-age=0, must-revalidate" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit +--- request +GET /hello +--- response_headers +Cache-Control: no-cache, max-age=0, must-revalidate + + + +=== TEST 22: set headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Cache-Control: no-cache" + ], + "set": { + "Cache-Control": "max-age=0, must-revalidate" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: hit +--- request +GET /hello +--- response_headers +Cache-Control: max-age=0, must-revalidate + + + +=== TEST 24: remove headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Set-Cookie: =; Max-Age=" + ], + "set": { + "Cache-Control": "max-age=0, must-revalidate" + }, + "remove": [ + "Set-Cookie", + "Cache-Control" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: hit +--- request +GET /hello +--- response_headers +Cache-Control: +Set-Cookie: diff --git a/t/plugin/rocketmq-logger2.t b/t/plugin/rocketmq-logger2.t index 286d3cad4fe0..60178612ee5e 100644 --- a/t/plugin/rocketmq-logger2.t +++ b/t/plugin/rocketmq-logger2.t @@ -208,9 +208,8 @@ qr/failed to send data to rocketmq topic: .*, nameserver_list: \{"127.0.0.127":9 content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata/rocketmq-logger', - ngx.HTTP_DELETE, - nil, - [[{"action": "delete"}]]) + ngx.HTTP_DELETE + ) } } --- response_body diff --git a/t/plugin/sls-logger.t b/t/plugin/sls-logger.t index 11db664cd22b..1c36383fb3b1 100644 --- a/t/plugin/sls-logger.t +++ b/t/plugin/sls-logger.t @@ -198,3 +198,49 @@ hello world --- response_body passed --- timeout: 5 + + + +=== TEST 8: add log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/sls-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: access +--- extra_init_by_lua + local json = require("toolkit.json") + local rfc5424 = require("apisix.plugins.slslog.rfc5424") + local old_f = rfc5424.encode + rfc5424.encode = function(facility, severity, hostname, appname, pid, project, + logstore, access_key_id, access_key_secret, msg) + local r = json.decode(msg) + assert(r.client_ip == "127.0.0.1", r.client_ip) + assert(r.host == "localhost", r.host) + return old_f(facility, severity, hostname, appname, pid, project, + logstore, access_key_id, access_key_secret, msg) + end +--- request +GET /hello +--- response_body +hello world diff --git a/t/plugin/tencent-cloud-cls.t b/t/plugin/tencent-cloud-cls.t new file mode 100644 index 000000000000..14006bbd7e9f --- /dev/null +++ b/t/plugin/tencent-cloud-cls.t @@ -0,0 +1,330 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 10420; + location /structuredlog { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "tencent-cloud-cls body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "tencent-cloud-cls headers: " .. k .. ":" .. v) + end + ngx.say("ok") + } + } + } + server { + listen 10421; + location /structuredlog { + content_by_lua_block { + ngx.exit(500) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local ok, err = plugin.check_schema({ + cls_host = "ap-guangzhou.cls.tencentyun.com", + cls_topic = "143b5d70-139b-4aec-b54e-bb97756916de", + secret_id = "secret_id", + secret_key = "secret_key", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: cls config missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local ok, err = plugin.check_schema({ + cls_host = "ap-guangzhou.cls.tencentyun.com", + cls_topic = "143b5d70-139b-4aec-b54e-bb97756916de", + secret_id = "secret_id", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "secret_key" is required +done + + + +=== TEST 3: add plugin for incorrect server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10421", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: incorrect server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] failed to process entries [1/1]: got wrong status: 500 +--- wait: 0.5 + + + +=== TEST 5: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: access local server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 7: verify request +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_to_cls = function(self, logs) + if (#logs ~= 1) then + ngx.log(ngx.ERR, "unexpected logs length: ", #logs) + return + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 8: verify cls api request +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing + + + +=== TEST 9: plugin metadata +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/tencent-cloud-cls', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: log use log_format +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + local has_host, has_timestamp, has_client_ip = false, false, false + for i, tag in ipairs(log.contents) do + if tag.key == "host" then + has_host = true + end + if tag.key == "@timestamp" then + has_timestamp = true + end + if tag.key == "client_ip" then + has_client_ip = true + end + end + if not(has_host and has_timestamp and has_client_ip) then + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- wait: 0.5 diff --git a/t/plugin/traffic-split2.t b/t/plugin/traffic-split2.t index 41bee399d689..c41886283d56 100644 --- a/t/plugin/traffic-split2.t +++ b/t/plugin/traffic-split2.t @@ -744,7 +744,6 @@ qr/1980, 1981, 1982, 1980, 1981, 1982, 1980, 1981, 1982/ ngx.say(body) } } ---- skip_nginx: 5: < 1.19.0 --- response_body passed @@ -753,7 +752,6 @@ passed === TEST 19: hit route --- request GET /uri?id=1 ---- skip_nginx: 5: < 1.19.0 --- response_body eval qr/host: 127.0.0.1/ --- error_log diff --git a/t/plugin/traffic-split5.t b/t/plugin/traffic-split5.t index 5e2b80ac363e..1de76cea5d42 100644 --- a/t/plugin/traffic-split5.t +++ b/t/plugin/traffic-split5.t @@ -405,3 +405,62 @@ passed } --- response_body 1970, 1970, 1971, 1972 + + + +=== TEST 7: set up traffic-split rule +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "arg_name", "==", "jack" } } + } }, + weighted_upstreams = { { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1979"] = 1 + }, + }, + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit and check default timeout +--- http_config +proxy_connect_timeout 12345s; +--- request +GET /server_port?name=jack +--- log_level: debug +--- error_log eval +qr/event timer add: \d+: 12345000:\d+/ +--- error_code: 502 diff --git a/t/plugin/ua-restriction.t b/t/plugin/ua-restriction.t index 82e665894655..0e8a9544bd34 100644 --- a/t/plugin/ua-restriction.t +++ b/t/plugin/ua-restriction.t @@ -725,7 +725,9 @@ hello world "denylist": [ "foo" ], - "disable": true + "_meta": { + "disable": true + } } } }]] diff --git a/t/plugin/wolf-rbac.t b/t/plugin/wolf-rbac.t index 954f9c1ca114..6e0fb0d7dd16 100644 --- a/t/plugin/wolf-rbac.t +++ b/t/plugin/wolf-rbac.t @@ -115,12 +115,12 @@ done for _, data in ipairs(data) do local code, body = t(data.url, ngx.HTTP_PUT, data.data) - ngx.say(code..body) + ngx.say(body) end } } --- response_body eval -"201passed\n" x 3 +"passed\n" x 3 @@ -342,17 +342,17 @@ x-rbac-token: V1#invalid-appid#rbac-token === TEST 16: verify: failed --- request GET /hello1 ---- error_code: 401 +--- error_code: 403 --- more_headers x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token --- response_body -{"message":"Invalid user permission"} +{"message":"ERR_ACCESS_DENIED"} --- grep_error_log eval -qr/no permission to access */ +qr/ERR_ACCESS_DENIED */ --- grep_error_log_out -no permission to access -no permission to access -no permission to access +ERR_ACCESS_DENIED +ERR_ACCESS_DENIED +ERR_ACCESS_DENIED @@ -545,3 +545,36 @@ location /t { } --- response_body_like eval qr/success to change password/ + + + +=== TEST 29: verify: failed, server internal error +--- request +GET /hello/500 +--- error_code: 500 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"request to wolf-server failed, status:500"} +--- grep_error_log eval +qr/request to wolf-server failed, status:500 */ +--- grep_error_log_out +request to wolf-server failed, status:500 +request to wolf-server failed, status:500 + + + +=== TEST 30: verify: failed, token is expired +--- request +GET /hello/401 +--- error_code: 401 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"ERR_TOKEN_INVALID"} +--- grep_error_log eval +qr/ERR_TOKEN_INVALID */ +--- grep_error_log_out +ERR_TOKEN_INVALID +ERR_TOKEN_INVALID +ERR_TOKEN_INVALID diff --git a/t/plugin/workflow.t b/t/plugin/workflow.t new file mode 100644 index 000000000000..e1bf77a1f26c --- /dev/null +++ b/t/plugin/workflow.t @@ -0,0 +1,689 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + status = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = "403" + } + } + } + } + } + }, + { + rules = { + { + case = { + + }, + actions = { + { + "return", + { + code = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "fake", + { + code = 403 + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +property "rules" validation failed: failed to validate item 1: property "actions" is required +property "rules" validation failed: failed to validate item 1: property "actions" validation failed: failed to validate item 1: expect array to have at least 1 items +failed to validate the 'return' action: property "code" is required +failed to validate the 'return' action: property "code" validation failed: wrong type: expected integer, got string +property "rules" validation failed: failed to validate item 1: property "case" validation failed: expect array to have at least 1 items +unsupported action: fake + + + +=== TEST 2: set plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: trigger workflow +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 4: multiple conditions in one case +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"], + ["arg_foo", "==", "bar"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: missing match the only case +--- request +GET /hello?foo=bad + + + +=== TEST 6: trigger workflow +--- request +GET /hello?foo=bar +--- error_code: 403 +--- response_body +{"error_msg":"rejected by workflow"} + + + +=== TEST 7: multiple cases with different actions +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello2"} + }, + actions = { + { + "return", + { + code = 401 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: trigger one case +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 9: trigger another case +--- request +GET /hello2 +--- error_code: 401 + + + +=== TEST 10: match case in order +# rules is an array, match in the order of the index of the array, +# when cases are matched, actions are executed and do not continue +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"arg_foo", "==", "bar"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 401 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: both case 1&2 matched, trigger the first cases +--- request +GET /hello?foo=bar +--- error_code: 403 + + + +=== TEST 12: case 1 mismatched, trigger the second cases +--- request +GET /hello?foo=bad +--- error_code: 401 + + + +=== TEST 13: all cases mismatched, pass to upstream +--- request +GET /hello1 +--- response_body +hello1 world + + + +=== TEST 14: schema check(limit-count) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2, time_window = 60, rejected_code = 503, key = 'remote_addr'} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {time_window = 60} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + group = "services_1" + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +failed to validate the 'limit-count' action: property "time_window" is required +failed to validate the 'limit-count' action: property "count" is required +failed to validate the 'limit-count' action: group is not supported + + + +=== TEST 15: set actions as limit-count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "limit-count", + { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 17: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: cross-hit case 1 and case 2, up limit by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1", +"GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200, 200, 200, 503, 503] diff --git a/t/plugin/workflow2.t b/t/plugin/workflow2.t new file mode 100644 index 000000000000..b30567532832 --- /dev/null +++ b/t/plugin/workflow2.t @@ -0,0 +1,285 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: multiple cases with different actions(return & limit-count) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello1"] +--- error_code eval +[403, 200, 503] + + + +=== TEST 3: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: cross-hit case 1 and case 2, up limit by isolation 2 +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 6: different actions with different limit count conf, up limit by isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: case 1 up limit, case 2 psssed +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 200] diff --git a/t/plugin/zipkin2.t b/t/plugin/zipkin2.t index 3175075d32ed..8423f6f67d63 100644 --- a/t/plugin/zipkin2.t +++ b/t/plugin/zipkin2.t @@ -98,6 +98,7 @@ passed b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90 --- response_headers x-b3-sampled: 1 +x-b3-traceid: 80f198ee56343ba864fe8b2a57d3eff7 --- raw_response_headers_unlike b3: --- error_log @@ -124,6 +125,9 @@ invalid b3 header b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-0-05e3ac9a4f6e3b90 --- response_headers x-b3-sampled: 0 +x-b3-traceid: 80f198ee56343ba864fe8b2a57d3eff7 +x-b3-parentspanid: 05e3ac9a4f6e3b90 +x-b3-spanid: e457b5a2e4d86bd1 @@ -132,6 +136,9 @@ x-b3-sampled: 0 b3: 0 --- response_headers x-b3-sampled: 0 +x-b3-traceid: +x-b3-parentspanid: +x-b3-spanid: diff --git a/t/router/multi-ssl-certs.t b/t/router/multi-ssl-certs.t index 1c302f9ed134..1bfb4d36bef9 100644 --- a/t/router/multi-ssl-certs.t +++ b/t/router/multi-ssl-certs.t @@ -36,17 +36,14 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "www.test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "www.test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) @@ -183,17 +180,14 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} - local code, body = t.test('/apisix/admin/ssl/2', + local code, body = t.test('/apisix/admin/ssls/2', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test2.com" - }, - "key": "/apisix/ssl/2" + "value": { + "sni": "*.test2.com" }, - "action": "set" + "key": "/apisix/ssls/2" }]] ) @@ -268,17 +262,14 @@ location /t { local ssl_key = t.read_file("t/certs/apisix_admin_ssl.key") local data = {cert = ssl_cert, key = ssl_key, sni = "apisix.dev"} - local code, body = t.test('/apisix/admin/ssl/3', + local code, body = t.test('/apisix/admin/ssls/3', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "apisix.dev" - }, - "key": "/apisix/ssl/3" + "value": { + "sni": "apisix.dev" }, - "action": "set" + "key": "/apisix/ssls/3" }]] ) @@ -349,9 +340,9 @@ location /t { local core = require("apisix.core") local t = require("lib.test_admin") - t.test('/apisix/admin/ssl/1', ngx.HTTP_DELETE) - t.test('/apisix/admin/ssl/2', ngx.HTTP_DELETE) - t.test('/apisix/admin/ssl/3', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/2', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/3', ngx.HTTP_DELETE) } } diff --git a/t/router/radixtree-sni.t b/t/router/radixtree-sni.t index b8494d315b03..c1b781473f41 100644 --- a/t/router/radixtree-sni.t +++ b/t/router/radixtree-sni.t @@ -36,19 +36,16 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "www.test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "www.test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -79,7 +76,7 @@ passed }, "uri": "/hello" }]] - ) + ) if code >= 300 then ngx.status = code @@ -223,19 +220,16 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -337,19 +331,16 @@ location /t { local ssl_key = t.read_file("t/certs/apisix.key") local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -451,19 +442,16 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test2.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.test2.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -579,19 +567,16 @@ location /t { local data = {status = 0} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, core.json.encode(data), [[{ - "node": { - "value": { - "status": 0 - }, - "key": "/apisix/ssl/1" + "value": { + "status": 0 }, - "action": "compareAndSwap" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -660,19 +645,16 @@ location /t { local data = {status = 1} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PATCH, core.json.encode(data), [[{ - "node": { - "value": { - "status": 1 - }, - "key": "/apisix/ssl/1" + "value": { + "status": 1 }, - "action": "compareAndSwap" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -744,19 +726,16 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "snis": ["test2.com", "*.test2.com"] - }, - "key": "/apisix/ssl/1" + "value": { + "snis": ["test2.com", "*.test2.com"] }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -874,7 +853,7 @@ location /t { local ssl_key = t.aes_encrypt(t.read_file("t/certs/test2.key")) local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/router/radixtree-sni2.t b/t/router/radixtree-sni2.t index 83d1187dc0b5..b0940a1a1646 100644 --- a/t/router/radixtree-sni2.t +++ b/t/router/radixtree-sni2.t @@ -45,17 +45,14 @@ location /t { sni = "test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "test.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "test.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] ) ngx.status = code @@ -167,19 +164,16 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data), [[{ - "node": { - "value": { - "sni": "*.test2.com" - }, - "key": "/apisix/ssl/1" + "value": { + "sni": "*.test2.com" }, - "action": "set" + "key": "/apisix/ssls/1" }]] - ) + ) ngx.status = code ngx.say(body) @@ -270,10 +264,10 @@ location /t { key = raw_ssl_key, } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) - ) + ) ngx.status = code ngx.print(body) @@ -298,7 +292,7 @@ location /t { local ssl_key = t.read_file("t/certs/incorrect.key") local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) @@ -412,7 +406,7 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, sni = "*.TesT2.com"} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) @@ -477,7 +471,7 @@ location /t { local ssl_key = t.read_file("t/certs/test2.key") local data = {cert = ssl_cert, key = ssl_key, snis = {"TesT2.com", "a.com"}} - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/router/radixtree-uri-with-parameter.t b/t/router/radixtree-uri-with-parameter.t index d8fa0950c235..f591fab30b69 100644 --- a/t/router/radixtree-uri-with-parameter.t +++ b/t/router/radixtree-uri-with-parameter.t @@ -59,21 +59,18 @@ __DATA__ "uri": "/name/:name/bar" }]], [[{ - "node": { - "value": { - "uri": "/name/:name/bar", - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/name/:name/bar", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] - ) + ) if code >= 300 then ngx.status = code @@ -162,21 +159,18 @@ qr/404 Not Found/ "uri": "/:name/foo" }]], [[{ - "node": { - "value": { - "uri": "/:name/foo", - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - } - }, - "key": "/apisix/routes/1" + "value": { + "uri": "/:name/foo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } }, - "action": "set" + "key": "/apisix/routes/1" }]] - ) + ) if code >= 300 then ngx.status = code @@ -221,11 +215,11 @@ GET /json/bbb/foo content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/services/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "hosts": ["bar.com"] }]] - ) + ) if code >= 300 then ngx.status = code @@ -234,8 +228,8 @@ GET /json/bbb/foo end local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "methods": ["GET"], "upstream": { "nodes": { @@ -249,7 +243,7 @@ GET /json/bbb/foo "service_id": "1", "uri": "/:name/hello" }]] - ) + ) if code >= 300 then ngx.status = code @@ -258,8 +252,8 @@ GET /json/bbb/foo end local code, body = t('/apisix/admin/routes/2', - ngx.HTTP_PUT, - [[{ + ngx.HTTP_PUT, + [[{ "methods": ["GET"], "upstream": { "nodes": { @@ -273,7 +267,7 @@ GET /json/bbb/foo "uri": "/:name/hello", "priority": -1 }]] - ) + ) if code >= 300 then ngx.status = code diff --git a/t/stream-node/mtls.t b/t/stream-node/mtls.t index 3caad2c1b7ee..35dcfc4fd626 100644 --- a/t/stream-node/mtls.t +++ b/t/stream-node/mtls.t @@ -100,7 +100,7 @@ __DATA__ depth = 2, } } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, json.encode(data) ) diff --git a/t/stream-node/sanity.t b/t/stream-node/sanity.t index f42b89ca9832..06e809dfd25a 100644 --- a/t/stream-node/sanity.t +++ b/t/stream-node/sanity.t @@ -373,7 +373,6 @@ GET /t passed --- no_error_log [error] ---- skip_nginx: 5: < 1.19.0 @@ -384,7 +383,6 @@ mmm hello world --- no_error_log [error] ---- skip_nginx: 5: < 1.19.0 diff --git a/t/stream-node/sni.t b/t/stream-node/sni.t index 0d71313640bb..29181527902c 100644 --- a/t/stream-node/sni.t +++ b/t/stream-node/sni.t @@ -46,7 +46,7 @@ __DATA__ cert = ssl_cert, key = ssl_key, sni = "*.test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/stream-node/tls.t b/t/stream-node/tls.t index 2f3016b55830..7e9568c4d94a 100644 --- a/t/stream-node/tls.t +++ b/t/stream-node/tls.t @@ -46,7 +46,7 @@ __DATA__ cert = ssl_cert, key = ssl_key, sni = "test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/stream-node/upstream-tls.t b/t/stream-node/upstream-tls.t index a9fce58a4c53..e721d4c354b7 100644 --- a/t/stream-node/upstream-tls.t +++ b/t/stream-node/upstream-tls.t @@ -118,7 +118,7 @@ hello apisix_backend cert = ssl_cert, key = ssl_key, sni = "test.com", } - local code, body = t.test('/apisix/admin/ssl/1', + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_PUT, core.json.encode(data) ) diff --git a/t/stream-plugin/mqtt-proxy.t b/t/stream-plugin/mqtt-proxy.t index 4a59e376d362..69403f380f85 100644 --- a/t/stream-plugin/mqtt-proxy.t +++ b/t/stream-plugin/mqtt-proxy.t @@ -39,12 +39,19 @@ __DATA__ "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { "host": "127.0.0.1", - "port": 1995 + "port": 1995, + "weight": 1 } - } + ] } }]] ) @@ -132,7 +139,7 @@ match(): not hit any route -=== TEST 6: check schema +=== TEST 6: set route with host --- config location /t { content_by_lua_block { @@ -145,51 +152,22 @@ match(): not hit any route "plugins": { "mqtt-proxy": { "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "host": "127.0.0.1" - } + "protocol_level": 4 } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.print(body) - } - } ---- request -GET /t ---- error_code: 400 ---- response_body -{"error_msg":"failed to check the configuration of stream plugin [mqtt-proxy]: property \"upstream\" validation failed: value should match only one schema, but matches none"} - - - -=== TEST 7: set route with host ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/stream_routes/1', - ngx.HTTP_PUT, - [[{ - "remote_addr": "127.0.0.1", - "server_port": 1985, - "plugins": { - "mqtt-proxy": { - "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { "host": "localhost", - "port": 1995 + "port": 1995, + "weight": 1 } - } + ] } }]] - ) + ) if code >= 300 then ngx.status = code @@ -206,7 +184,7 @@ passed -=== TEST 8: hit route +=== TEST 7: hit route --- stream_request eval "\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" --- stream_response @@ -216,54 +194,7 @@ hello world -=== TEST 9: set route with invalid host ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/stream_routes/1', - ngx.HTTP_PUT, - [[{ - "remote_addr": "127.0.0.1", - "server_port": 1985, - "plugins": { - "mqtt-proxy": { - "protocol_name": "MQTT", - "protocol_level": 4, - "upstream": { - "host": "loc", - "port": 1995 - } - } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed ---- no_error_log -[error] - - - -=== TEST 10: hit route ---- stream_request eval -"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" ---- error_log -failed to parse domain: loc, error: ---- timeout: 10 - - - -=== TEST 11: set route with upstream +=== TEST 8: set route with upstream --- config location /t { content_by_lua_block { @@ -305,7 +236,7 @@ passed -=== TEST 12: hit route +=== TEST 9: hit route --- stream_request eval "\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" --- stream_response @@ -319,7 +250,7 @@ mqtt client id: foo -=== TEST 13: hit route with empty client id +=== TEST 10: hit route with empty client id --- stream_request eval "\x10\x0c\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x00" --- stream_response @@ -332,7 +263,7 @@ qr/mqtt client id: \w+/ -=== TEST 14: MQTT 5 +=== TEST 11: MQTT 5 --- config location /t { content_by_lua_block { @@ -374,7 +305,7 @@ passed -=== TEST 15: hit route with empty property +=== TEST 12: hit route with empty property --- stream_request eval "\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" --- stream_response @@ -387,7 +318,7 @@ qr/mqtt client id: \w+/ -=== TEST 16: hit route with property +=== TEST 13: hit route with property --- stream_request eval "\x10\x1b\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x05\x11\x00\x00\x0e\x10\x00\x09\x63\x6c\x69\x6e\x74\x2d\x31\x31\x31" --- stream_response @@ -401,7 +332,7 @@ mqtt client id: clint-111 -=== TEST 17: balance with mqtt_client_id +=== TEST 14: balance with mqtt_client_id --- config location /t { content_by_lua_block { @@ -451,7 +382,7 @@ passed -=== TEST 18: hit route with empty id +=== TEST 15: hit route with empty id --- stream_request eval "\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" --- stream_response @@ -465,7 +396,7 @@ proxy request to 127.0.0.1:1995 -=== TEST 19: hit route with different client id, part 1 +=== TEST 16: hit route with different client id, part 1 --- stream_request eval "\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x66" --- stream_response @@ -480,7 +411,7 @@ proxy request to 0.0.0.0:1995 -=== TEST 20: hit route with different client id, part 2 +=== TEST 17: hit route with different client id, part 2 --- stream_request eval "\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x67" --- stream_response diff --git a/t/stream-plugin/mqtt-proxy2.t b/t/stream-plugin/mqtt-proxy2.t new file mode 100644 index 000000000000..e387b26dce1a --- /dev/null +++ b/t/stream-plugin/mqtt-proxy2.t @@ -0,0 +1,79 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set route with invalid host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "loc", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 2: hit route +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- error_log +failed to parse domain: loc, error: +--- timeout: 10 diff --git a/utils/create-ssl.py b/utils/create-ssl.py index 93f206819c97..e8a3daa33b73 100755 --- a/utils/create-ssl.py +++ b/utils/create-ssl.py @@ -30,7 +30,7 @@ key = f.read() sni = sys.argv[3] api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9080/apisix/admin/ssl/1", json={ +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ "cert": cert, "key": key, "snis": [sni], diff --git a/utils/gen-vote-contents.sh b/utils/gen-vote-contents.sh index 87ddedbc18d8..d644dfd6bedf 100755 --- a/utils/gen-vote-contents.sh +++ b/utils/gen-vote-contents.sh @@ -73,7 +73,7 @@ tar zxvf apache-apisix-$VERSION-src.tgz 4. Build Apache APISIX: -https://github.com/apache/apisix/blob/release/$BLOB_VERSION/docs/en/latest/installation-guide.md#installation-via-source-release-package +https://github.com/apache/apisix/blob/release/$BLOB_VERSION/docs/en/latest/building-apisix.md#building-apisix-from-source The vote will be open for at least 72 hours or until necessary number of votes are reached. diff --git a/utils/linux-install-etcd-client.sh b/utils/linux-install-etcd-client.sh index ea323aea41f2..f760b6f1777f 100755 --- a/utils/linux-install-etcd-client.sh +++ b/utils/linux-install-etcd-client.sh @@ -18,14 +18,14 @@ # ETCD_ARCH="amd64" -ETCD_VERSION=${ETCD_VERSION:-'3.4.18'} +ETCD_VERSION=${ETCD_VERSION:-'3.5.4'} ARCH=${ARCH:-`(uname -m | tr '[:upper:]' '[:lower:]')`} if [[ $ARCH == "arm64" ]] || [[ $ARCH == "aarch64" ]]; then ETCD_ARCH="arm64" fi -wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v3.4.18-linux-${ETCD_ARCH}.tar.gz +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}.tar.gz tar xf etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}.tar.gz sudo cp etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}/etcdctl /usr/local/bin/ rm -rf etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH} diff --git a/utils/linux-install-openresty.sh b/utils/linux-install-openresty.sh index 7498da3ab381..c97454e3e26e 100755 --- a/utils/linux-install-openresty.sh +++ b/utils/linux-install-openresty.sh @@ -48,4 +48,4 @@ else openresty="openresty-debug=$OPENRESTY_VERSION*" fi -sudo apt-get install "$openresty" lua5.1 liblua5.1-0-dev openresty-openssl111-debug-dev libldap2-dev +sudo apt-get install "$openresty" openresty-openssl111-debug-dev libldap2-dev