Skip to content

fix(interactive): Implement HttpIrMetaReader to Get Meta Data From Remote Http Service #5753

fix(interactive): Implement HttpIrMetaReader to Get Meta Data From Remote Http Service

fix(interactive): Implement HttpIrMetaReader to Get Meta Data From Remote Http Service #5753

Workflow file for this run

name: GraphScope Store CI
on:
# Trigger the workflow on push or pull request,
# but only for the main branch
workflow_dispatch:
push:
branches:
- main
paths-ignore:
- 'CONTRIBUTORS'
- 'LICENSE'
- 'NOTICE.txt'
- '**.md'
- '**.rst'
- 'docs/**'
- 'demo/**'
- 'scripts/**'
- 'tutorials/**'
pull_request:
branches:
- main
paths:
- 'proto/**'
- 'interactive_engine/**'
- 'python/graphscope/client/**'
- 'charts/graphscope-store/**'
- '.github/workflows/gss.yml'
- '!interactive_engine/**.md'
- '!charts/graphscope-store/**.md'
concurrency:
group: ${{ github.repository }}-${{ github.event.number || github.head_ref || github.sha }}-${{ github.workflow }}
cancel-in-progress: true
env:
GSS_IMAGE: registry.cn-hongkong.aliyuncs.com/graphscope/graphscope-store
jobs:
gremlin-test:
# Require the host is able to run docker without sudo and
# can `ssh localhost` without password, which may need to
# be configured manually when a new self-hosted runner is added.
runs-on: [self-hosted, manylinux2014]
if: ${{ github.repository == 'alibaba/GraphScope' }}
steps:
- uses: actions/checkout@v3
- name: Detect the tmate session
run: |
if grep -v "grep" .github/workflows/gss.yml | grep "action-tmate"; then
echo 'WARNING!!!the self-hosted machine cannot run tmate session, please debug it manually'
exit 1
fi
- uses: actions/cache@v3
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
restore-keys: |
${{ runner.os }}-maven-
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
~/.cache/sccache
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Build GraphScope Store
run: |
. ${HOME}/.graphscope_env
export SCCACHE_DIR=~/.cache/sccache
export RUSTC_WRAPPER=/usr/local/bin/sccache
cd ${GITHUB_WORKSPACE}/interactive_engine
mvn clean install -P groot -Drust.compile.mode=debug -DskipTests --quiet
mvn clean install -Pgroot-data-load --quiet
sccache --show-stats
- name: Gremlin Test
run: |
. ${HOME}/.graphscope_env
cd interactive_engine/groot-server
mvn test -P gremlin-test
- name: Upload tools for helm test to Artifact
uses: actions/upload-artifact@v3
with:
name: groot
path: |
interactive_engine/assembly/target/groot.tar.gz
interactive_engine/data-load-tool/target/data-load-tool-0.0.1-SNAPSHOT.jar
retention-days: 5
helm-test:
runs-on: [self-hosted, ubuntu2004]
if: ${{ github.repository == 'alibaba/GraphScope' }}
needs: [gremlin-test]
env:
JAVA_HOME: /usr/lib/jvm/default-java
GS_TEST_DIR: ${{ github.workspace }}/gstest
steps:
- uses: actions/checkout@v3
with:
submodules: true
- name: Detect the tmate session
run: |
if grep -v "grep" .github/workflows/gss.yml | grep "action-tmate"; then
echo 'WARNING!!!the self-hosted machine cannot run tmate session, please debug it manually'
exit 1
fi
- uses: actions/download-artifact@v3
with:
name: groot
path: artifacts
- name: Set GITHUB_ENV
run: |
short_sha=$(git rev-parse --short HEAD)
echo "SHORT_SHA=${short_sha}" >> $GITHUB_ENV
- name: Prepare Image
run: |
ls -la artifacts/*/*
mv artifacts/assembly/target/groot.tar.gz artifacts/
mv artifacts/data-load-tool/target/data-load-tool-0.0.1-SNAPSHOT.jar artifacts/
docker build -t ${{ env.GSS_IMAGE }}:${SHORT_SHA} \
-f .github/workflows/docker/graphscope-store.Dockerfile .
- name: Setup SSH
run: |
ssh-keygen -t rsa -f ~/.ssh/id_rsa -N ''
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod og-wx ~/.ssh/authorized_keys
echo "StrictHostKeyChecking no" >> ~/.ssh/config
sudo /etc/init.d/ssh start
- name: Create the kubernetes cluster
run: |
# download gstest
git clone -b master --single-branch --depth=1 https://github.com/7br/gstest.git ${GS_TEST_DIR}
minikube start --base-image='registry-vpc.cn-hongkong.aliyuncs.com/graphscope/kicbase:v0.0.30' \
--cpus='12' --memory='32000mb' --disk-size='40000mb'
minikube image load ${{ env.GSS_IMAGE }}:${SHORT_SHA}
- uses: dashanji/kubernetes-log-export-action@v5
env:
SHOW_TIMESTAMPS: 'true'
OUTPUT_DIR: ${{ github.workspace }}/helm-installation-logs
NAMESPACES: "gs*,default"
MODE: start
- name: Install graphscope-store with helm
run: |
# update helm dependency
cd ${GITHUB_WORKSPACE}/charts/graphscope-store
helm dependency update
# helm deployment and testing
cd ${GITHUB_WORKSPACE}/charts
helm install ci --set image.tag=${SHORT_SHA},distributed.enabled=true,store.replicaCount=2 ./graphscope-store
helm test ci --timeout 5m0s
- name: Test the helm deployment
run: |
# 1. get gss service endpoint
export GRPC_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services ci-graphscope-store-frontend)
export GREMLIN_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[1].nodePort}" services ci-graphscope-store-frontend)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
# 2. deploy hadoop hdfs
tar -zxf /home/runner/hadoop-2.10.2.tar.gz -C /tmp/
cd ${GITHUB_WORKSPACE}/.github/workflows/hadoop_scripts
./prepare_hadoop.sh /tmp/hadoop-2.10.2
export PATH=${PATH}:/tmp/hadoop-2.10.2/bin
# data-load-tool is needed for offline_load.sh
# see .github/workflows/hadoop_scripts/offline_load.sh
export LOADER_DIR=${GITHUB_WORKSPACE}/artifacts
export LOAD_DATA_SCRIPT=${GITHUB_WORKSPACE}/.github/workflows/hadoop_scripts/offline_load.sh
sed s/GRAPH_ENDPOINT/$NODE_IP:$GRPC_PORT/ databuild.config.template > databuild.config
# 3. upload data to HDFS
hadoop fs -mkdir /ldbc_sample || true
hadoop fs -put ${GS_TEST_DIR}/ldbc_sample/person_0_0.csv /ldbc_sample/person_0_0.csv
hadoop fs -put ${GS_TEST_DIR}/ldbc_sample/person_knows_person_0_0.csv /ldbc_sample/person_knows_person_0_0.csv
# python test
cd ${GITHUB_WORKSPACE}/python
python3 -m pip install -r ./requirements.txt --user
python3 -m pip install -r ./requirements-dev.txt --user
python3 -m pip install pytest-cov --user
python3 setup.py build_proto
python3 -m pytest -s -vvv graphscope/tests/kubernetes/test_store_service.py -k test_demo_fresh
- name: restart helm and run demo with the PersistentVolume
run: |
# restart helm and run demo with the PersistentVolume
helm uninstall ci
sleep 30
cd ${GITHUB_WORKSPACE}/charts
helm install ci --set image.tag=${SHORT_SHA},distributed.enabled=true,store.replicaCount=2 ./graphscope-store
- name: Helm Test with Helm Deployment and PersistentVolume
run: |
# helm test and python test on the restarted store
export GRPC_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services ci-graphscope-store-frontend)
export GREMLIN_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[1].nodePort}" services ci-graphscope-store-frontend)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
helm test ci --timeout 10m0s
- name: Python Test with Helm Deployment and PersistentVolume
run: |
export GRPC_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services ci-graphscope-store-frontend)
export GREMLIN_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[1].nodePort}" services ci-graphscope-store-frontend)
export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
cd ${GITHUB_WORKSPACE}/python
python3 -m pytest -s -vvv graphscope/tests/kubernetes/test_store_service.py -k test_demo_after_restart
- uses: dashanji/kubernetes-log-export-action@v5
env:
OUTPUT_DIR: ${{ github.workspace }}/helm-installation-logs
MODE: stop
- name: upload the k8s logs to artifact
if: ${{ failure() }}
uses: actions/upload-artifact@v3
continue-on-error: true
with:
name: k8s-test-logs
path: ${{ github.workspace }}/helm-installation-logs