diff --git a/README-ZH.md b/README-ZH.md index 15c13c1e39..1b76faaa4e 100644 --- a/README-ZH.md +++ b/README-ZH.md @@ -7,13 +7,15 @@ ## 引言 -DataSphere Studio(简称DSS)是微众银行大数据平台——WeDataSphere,自研的一站式数据应用开发管理门户。 +DataSphere Studio(简称DSS)是微众银行自研的一站式数据应用开发管理门户。 -基于 [**Linkis**](https://github.com/WeBankFinTech/Linkis) 计算中间件构建,可轻松整合上层各数据应用系统,让数据应用开发变得简洁又易用。 +基于插拔式的集成框架设计,及计算中间件 [**Linkis**](https://github.com/WeBankFinTech/Linkis) ,可轻松接入上层各种数据应用系统,让数据开发变得简洁又易用。 -DataSphere Studio定位为数据应用开发门户,闭环涵盖数据应用开发全流程。在统一的UI下,以工作流式的图形化拖拽开发体验,满足从数据导入、脱敏清洗、分析挖掘、质量检测、可视化展现、定时调度到数据输出应用等,数据应用开发全流程场景需求。 +在统一的UI下,DataSphere Studio以工作流式的图形化拖拽开发体验,将满足从数据交换、脱敏清洗、分析挖掘、质量检测、可视化展现、定时调度到数据输出应用等,数据应用开发全流程场景需求。 -借助于Linkis计算中间件的连接、复用与简化能力,DSS天生便具备了金融级高并发、高可用、多租户隔离和资源管控等执行与调度能力。 +**DSS通过插拔式的集成框架设计,让用户可以根据需要,简单快速替换DSS已集成的各种功能组件,或新增功能组件。** + +借助于 [**Linkis**](https://github.com/WeBankFinTech/Linkis) 计算中间件的连接、复用与简化能力,DSS天生便具备了金融级高并发、高可用、多租户隔离和资源管控等执行与调度能力。 ## 界面预览 @@ -37,10 +39,14 @@ DSS主要特点:        4、工作流调度工具——[Azkaban](https://azkaban.github.io/) +        **DSS插拔式的框架设计模式,允许用户快速替换DSS已集成的各个Web系统**。如:将Scriptis替换成Zeppelin,将Azkaban替换成DolphinScheduler。 + ![DSS一站式](images/zh_CN/readme/onestop.gif) ### 二、基于Linkis计算中间件,打造独有的AppJoint设计理念 +        AppJoint,是DSS可以简单快速集成各种上层Web系统的核心概念。 +        AppJoint——应用关节,定义了一套统一的前后台接入规范,可让外部数据应用系统快速简单地接入,成为DSS数据应用开发中的一环。        DSS通过串联多个AppJoint,编排成一条支持实时执行和定时调度的工作流,用户只需简单拖拽即可完成数据应用的全流程开发。 @@ -53,6 +59,10 @@ DSS主要特点: ### 四、已集成的数据应用组件 +        DSS通过实现多个AppJoint,已集成了丰富多样的各种上层数据应用系统,基本可满足用户的数据开发需求。 + +        **用户如果有需要,也可以轻松集成新的数据应用系统,以替换或丰富DSS的数据应用开发流程。** +        1、DSS的调度能力——Azkaban AppJoint            用户的很多数据应用,通常希望具备周期性的调度能力。 @@ -113,6 +123,10 @@ DSS主要特点:            空节点、子工作流节点。 +        8、**节点扩展** + +            **根据需要,用户可以简单快速替换DSS已集成的各种功能组件,或新增功能组件。** + ## 与类似系统对比 @@ -140,13 +154,35 @@ DSS主要特点: ## 文档列表 +#### 1. 安装编译文档 + +[快速安装使用文档](docs/zh_CN/ch2/DSS快速安装使用文档.md) + +[**DSS安装常见问题列表**](docs/zh_CN/ch1/DSS安装常见问题列表.md) + [DSS编译文档](docs/zh_CN/ch1/DSS编译文档.md) +#### 2. 使用文档 + +[快速使用文档](docs/zh_CN/ch3/DataSphere_Studio_QuickStart.md) + [用户手册](docs/zh_CN/ch3/DSS_User_Manual.md) -[外部系统快速接入DSS](docs/zh_CN/ch4/第三方系统接入DSS指南.md) +#### 3. AppJoint插件安装文档 + +**以下为手动安装相关插件的指南,DSS一键安装【标准版】已自动安装了以下插件,可忽略。** + +[DSS的Azkaban AppJoint插件安装指南](docs/zh_CN/ch4/如何接入调度系统Azkaban.md) + +[DSS的Qualitis AppJoint插件安装指南](https://github.com/WeBankFinTech/Qualitis/blob/master/docs/zh_CN/ch1/%E6%8E%A5%E5%85%A5%E5%B7%A5%E4%BD%9C%E6%B5%81%E6%8C%87%E5%8D%97.md) + +#### 4. 第三方系统如何接入文档 + +[DSS如何快速集成第三方系统](docs/zh_CN/ch4/第三方系统接入DSS指南.md) + +#### 5. 架构文档 -[如何接入调度系统Azkaban](docs/zh_CN/ch4/如何接入调度系统Azkaban.md) +[DSS工程发布到调度系统的架构设计](docs/zh_CN/ch4/DSS工程发布调度系统架构设计.md) 更多文档,敬请期待! diff --git a/bin/checkEnv.sh b/bin/checkEnv.sh new file mode 100644 index 0000000000..dffcb77189 --- /dev/null +++ b/bin/checkEnv.sh @@ -0,0 +1,46 @@ +# +# Copyright 2019 WeBank +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#!/bin/sh +say() { + printf 'check command fail \n %s\n' "$1" +} + +err() { + say "$1" >&2 + exit 1 +} + +check_cmd() { + command -v "$1" > /dev/null 2>&1 +} + +need_cmd() { + if ! check_cmd "$1"; then + err "need '$1' (your linux command not found)" + fi +} +echo "<-----start to check linux cmd:yum java mysql unzip expect telnet sed tar---->" +need_cmd yum +need_cmd java +need_cmd mysql +need_cmd unzip +need_cmd expect +need_cmd telnet +need_cmd sed +need_cmd tar +need_cmd source +need_cmd hostname +echo "<-----end to check linux cmd:yum java mysql unzip expect telnet sed tar------>" diff --git a/bin/install.sh b/bin/install.sh index 825448a859..70adc5a341 100644 --- a/bin/install.sh +++ b/bin/install.sh @@ -1,3 +1,18 @@ +# +# Copyright 2019 WeBank +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# #!/bin/sh #Actively load user env source ~/.bash_profile @@ -38,6 +53,11 @@ else fi } + +#check env +sh ${workDir}/bin/checkEnv.sh +isSuccess "check env" + function checkJava(){ java -version isSuccess "execute java --version" @@ -158,38 +178,55 @@ fi ##init db if [[ '2' = "$MYSQL_INSTALL_MODE" ]];then mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -D$MYSQL_DB --default-character-set=utf8 -e "source ${workDir}/db/dss_ddl.sql" - isSuccess "source linkis_ddl.sql" - LOCAL_IP="`ifconfig | grep 'inet' | grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $2}'`" + isSuccess "source dss_ddl.sql" + LOCAL_IP="`hostname -i`" if [ $GATEWAY_INSTALL_IP == "127.0.0.1" ];then echo "GATEWAY_INSTALL_IP is equals 127.0.0.1 ,we will change it to ip address" GATEWAY_INSTALL_IP_2=$LOCAL_IP else GATEWAY_INSTALL_IP_2=$GATEWAY_INSTALL_IP fi - echo $GATEWAY_INSTALL_IP_2 + #echo $GATEWAY_INSTALL_IP_2 sed -i "s/GATEWAY_INSTALL_IP_2/$GATEWAY_INSTALL_IP_2/g" ${workDir}/db/dss_dml.sql sed -i "s/GATEWAY_PORT/$GATEWAY_PORT/g" ${workDir}/db/dss_dml.sql - if [ $AZKABAN_ADRESS_IP == "127.0.0.1" ];then - echo "AZKABAN_ADRESS_IP is equals 127.0.0.1 ,we will change it to ip address" - AZKABAN_ADRESS_IP_2=$LOCAL_IP - else - AZKABAN_ADRESS_IP_2=$AZKABAN_ADRESS_IP - fi - echo $AZKABAN_ADRESS_IP_2 - sed -i "s/AZKABAN_ADRESS_IP_2/$AZKABAN_ADRESS_IP_2/g" ${workDir}/db/dss_dml.sql - sed -i "s/AZKABAN_ADRESS_PORT/$AZKABAN_ADRESS_PORT/g" ${workDir}/db/dss_dml.sql if [ $VISUALIS_NGINX_IP == "127.0.0.1" ]||[ $VISUALIS_NGINX_IP == "0.0.0.0" ];then echo "VISUALIS_NGINX_IP is equals $VISUALIS_NGINX_IP ,we will change it to ip address" VISUALIS_NGINX_IP_2=$LOCAL_IP else VISUALIS_NGINX_IP_2=$VISUALIS_NGINX_IP fi - echo $VISUALIS_NGINX_IP_2 + #echo $VISUALIS_NGINX_IP_2 sed -i "s/VISUALIS_NGINX_IP_2/$VISUALIS_NGINX_IP_2/g" ${workDir}/db/dss_dml.sql sed -i "s/VISUALIS_NGINX_PORT/$VISUALIS_NGINX_PORT/g" ${workDir}/db/dss_dml.sql mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -D$MYSQL_DB --default-character-set=utf8 -e "source ${workDir}/db/dss_dml.sql" - isSuccess "source linkis_dml.sql" - echo "Rebuild the table" + isSuccess "source dss_dml.sql" + if [[ '2' = "$INSTALL_MODE" ]];then + echo "start to update azkaban and qualitis table info " + #azkaban + if [ $AZKABAN_ADRESS_IP == "127.0.0.1" ];then + echo "AZKABAN_ADRESS_IP is equals 127.0.0.1 ,we will change it to ip address" + AZKABAN_ADRESS_IP_2=$LOCAL_IP + else + AZKABAN_ADRESS_IP_2=$AZKABAN_ADRESS_IP + fi + echo $AZKABAN_ADRESS_IP_2 + sed -i "s/AZKABAN_ADRESS_IP_2/$AZKABAN_ADRESS_IP_2/g" ${workDir}/db/azkaban.sql + sed -i "s/AZKABAN_ADRESS_PORT/$AZKABAN_ADRESS_PORT/g" ${workDir}/db/azkaban.sql + mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -D$MYSQL_DB --default-character-set=utf8 -e "source ${workDir}/db/azkaban.sql" + isSuccess "source azkaban.sql" + #qualitis + if [ $QUALITIS_ADRESS_IP == "127.0.0.1" ];then + echo "QUALITIS_ADRESS_IP is equals 127.0.0.1 ,we will change it to ip address" + QUALITIS_ADRESS_IP_2=$LOCAL_IP + else + QUALITIS_ADRESS_IP_2=$QUALITIS_ADRESS_IP + fi + echo $QUALITIS_ADRESS_IP_2 + sed -i "s/QUALITIS_ADRESS_IP_2/$QUALITIS_ADRESS_IP_2/g" ${workDir}/db/qualitis.sql + sed -i "s/QUALITIS_ADRESS_PORT/$QUALITIS_ADRESS_PORT/g" ${workDir}/db/qualitis.sql + mysql -h$MYSQL_HOST -P$MYSQL_PORT -u$MYSQL_USER -p$MYSQL_PASSWORD -D$MYSQL_DB --default-character-set=utf8 -e "source ${workDir}/db/qualitis.sql" + isSuccess "source qualitis.sql" + fi fi ##env check @@ -304,8 +341,7 @@ isSuccess "install ${APPJOINTNAME}.zip" } ##function end -## -ver Install +##dss-Server install PACKAGE_DIR=dss/dss-server SERVERNAME=dss-server SERVER_IP=$DSS_SERVER_INSTALL_IP diff --git a/bin/start-all.sh b/bin/start-all.sh index abc0e64ed9..df229abc82 100644 --- a/bin/start-all.sh +++ b/bin/start-all.sh @@ -76,6 +76,16 @@ sleep 15 #for Eureka register SERVER_NAME=dss-server SERVER_IP=$DSS_SERVER_INSTALL_IP startApp +#MICRO_SERVICE_NAME=dss-server +#MICRO_SERVICE_IP=$DSS_SERVER_INSTALL_IP +#MICRO_SERVICE_PORT=$DSS_SERVER_PORT +#sh $workDir/check.sh $MICRO_SERVICE_NAME $MICRO_SERVICE_IP $MICRO_SERVICE_PORT +#state=`echo -e "\n" | telnet $MICRO_SERVICE_IP $MICRO_SERVICE_PORT 2>/dev/null | grep Connected | wc -l` +#if [ $state -eq 0 ]; then +# echo "" +# echo "ERROR " $MICRO_SERVICE_NAME "is a critical service and must be guaranteed to be started !!!" +# exit 1 +#fi #dss-flow-execution-entrance SERVER_NAME=dss-flow-execution-entrance @@ -91,3 +101,41 @@ SERVER_NAME=visualis-server SERVER_IP=$VISUALIS_SERVER_INSTALL_IP startApp + +echo "Start to check all dss microservice" + +#check dss-server +MICRO_SERVICE_NAME=dss-server +MICRO_SERVICE_IP=$DSS_SERVER_INSTALL_IP +MICRO_SERVICE_PORT=$DSS_SERVER_PORT +sh $workDir/checkMicro.sh $MICRO_SERVICE_NAME $MICRO_SERVICE_IP $MICRO_SERVICE_PORT +state=`echo -e "\n" | telnet $MICRO_SERVICE_IP $MICRO_SERVICE_PORT 2>/dev/null | grep Connected | wc -l` +isSuccess "$MICRO_SERVICE_NAME start" + + +#check dss-flow-execution-entrance +MICRO_SERVICE_NAME=dss-flow-execution-entrance +MICRO_SERVICE_IP=$FLOW_EXECUTION_INSTALL_IP +MICRO_SERVICE_PORT=$FLOW_EXECUTION_PORT +sh $workDir/checkMicro.sh $MICRO_SERVICE_NAME $MICRO_SERVICE_IP $MICRO_SERVICE_PORT +state=`echo -e "\n" | telnet $MICRO_SERVICE_IP $MICRO_SERVICE_PORT 2>/dev/null | grep Connected | wc -l` +isSuccess "$MICRO_SERVICE_NAME start" + +#check linkis-appjoint-entrance +MICRO_SERVICE_NAME=linkis-appjoint-entrance +MICRO_SERVICE_IP=$APPJOINT_ENTRANCE_INSTALL_IP +MICRO_SERVICE_PORT=$APPJOINT_ENTRANCE_PORT +sh $workDir/checkMicro.sh $MICRO_SERVICE_NAME $MICRO_SERVICE_IP $MICRO_SERVICE_PORT +state=`echo -e "\n" | telnet $MICRO_SERVICE_IP $MICRO_SERVICE_PORT 2>/dev/null | grep Connected | wc -l` +isSuccess "$MICRO_SERVICE_NAME start" + + +#check visualis-server +sleep 10 #for visualis-server +MICRO_SERVICE_NAME=visualis-server +MICRO_SERVICE_IP=$VISUALIS_SERVER_INSTALL_IP +MICRO_SERVICE_PORT=$VISUALIS_SERVER_PORT +sh $workDir/checkMicro.sh $MICRO_SERVICE_NAME $MICRO_SERVICE_IP $MICRO_SERVICE_PORT +state=`echo -e "\n" | telnet $MICRO_SERVICE_IP $MICRO_SERVICE_PORT 2>/dev/null | grep Connected | wc -l` +isSuccess "$MICRO_SERVICE_NAME start" + diff --git a/conf/config.sh b/conf/config.sh index b5a00f706b..895c2c02c4 100644 --- a/conf/config.sh +++ b/conf/config.sh @@ -15,7 +15,7 @@ WORKSPACE_USER_ROOT_PATH=file:///tmp/linkis/ RESULT_SET_ROOT_PATH=hdfs:///tmp/linkis ### 1、DataCheck APPJOINT,This service is used to provide DataCheck capability. -HIVE_META_URL=jdbc:mysql://127.0.0.1:3306/linkis?characterEncoding=UTF-8 +HIVE_META_URL=jdbc:mysql://127.0.0.1:3306/hivemeta?characterEncoding=UTF-8 HIVE_META_USER=xxx HIVE_META_PASSWORD=xxx @@ -50,16 +50,16 @@ GATEWAY_PORT=9001 ### SSH Port SSH_PORT=22 -#for azkaban +#Used to store the azkaban project transformed by DSS WDS_SCHEDULER_PATH=file:///appcom/tmp/wds/scheduler ###The IP address and port are written into the database here, so be sure to plan ahead ## visualis-server VISUALIS_SERVER_INSTALL_IP=127.0.0.1 VISUALIS_SERVER_PORT=9007 -### visualis nginx acess ip +### visualis nginx acess ip,keep consistent with DSS front end VISUALIS_NGINX_IP=0.0.0.0 -VISUALIS_NGINX_PORT=9009 +VISUALIS_NGINX_PORT=8088 ### Eventchecker APPJOINT ### This service is used to provide Eventchecker capability. it's config in db.sh same as dss-server. diff --git a/db/azkaban.sql b/db/azkaban.sql new file mode 100644 index 0000000000..489ded7bb1 --- /dev/null +++ b/db/azkaban.sql @@ -0,0 +1,3 @@ +INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'schedulis', NULL, '0', '1', NULL, '0', NULL, NULL, '1', NULL, NULL); +UPDATE `dss_application` SET url = 'http://AZKABAN_ADRESS_IP_2:AZKABAN_ADRESS_PORT', project_url = 'http://AZKABAN_ADRESS_IP_2:AZKABAN_ADRESS_PORT/manager?project=${projectName}',homepage_url = 'http://AZKABAN_ADRESS_IP_2:AZKABAN_ADRESS_PORT/homepage' WHERE `name` in + ('schedulis'); diff --git a/db/dss_dml.sql b/db/dss_dml.sql index 3ee9fca202..27146dedc4 100644 --- a/db/dss_dml.sql +++ b/db/dss_dml.sql @@ -1,6 +1,5 @@ INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'linkis', null, '0', '1', NULL, '0', '/home', NULL, '0', '/home', NULL); INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'visualis', null, '0', '1', NULL, '0', NULL, NULL, '1', NULL, NULL); -INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'schedulis', NULL, '0', '1', NULL, '0', NULL, NULL, '1', NULL, NULL); INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'workflow', null, '0', '1', NULL, '0', '/workflow', NULL, '0', '/project', NULL); INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'console', null, '0', '1', NULL, '0', '/console', NULL, '0', '/console', NULL); @@ -30,7 +29,6 @@ INSERT INTO `dss_flow_taxonomy` (`id`, `name`, `description`, `creator_id`, `cre UPDATE `dss_application` SET url = 'http://GATEWAY_INSTALL_IP_2:GATEWAY_PORT' WHERE `name` in('linkis','workflow'); UPDATE `dss_application` SET url = 'http://VISUALIS_NGINX_IP_2:VISUALIS_NGINX_PORT' WHERE `name` in('visualis'); UPDATE `dss_application` SET project_url = 'http://VISUALIS_NGINX_IP_2:VISUALIS_NGINX_PORT/dss/visualis/#/project/${projectId}',homepage_url = 'http://VISUALIS_NGINX_IP_2:VISUALIS_NGINX_PORT/dss/visualis/#/projects' WHERE `name` in('visualis'); -UPDATE `dss_application` SET url = 'http://AZKABAN_ADRESS_IP_2:AZKABAN_ADRESS_PORT', project_url = 'http://AZKABAN_ADRESS_IP_2:AZKABAN_ADRESS_PORT/manager?project=${projectName}',homepage_url = 'http://AZKABAN_ADRESS_IP_2:AZKABAN_ADRESS_PORT/homepage' WHERE `name` in('schedulis'); UPDATE `dss_workflow_node` SET jump_url = 'http://VISUALIS_NGINX_IP_2:VISUALIS_NGINX_PORT/dss/visualis/#/project/${projectId}/display/${nodeId}' where node_type = 'linkis.appjoint.visualis.display'; UPDATE `dss_workflow_node` SET jump_url = 'http://VISUALIS_NGINX_IP_2:VISUALIS_NGINX_PORT/dss/visualis/#/project/${projectId}/portal/${nodeId}/portalName/${nodeName}' where node_type = 'linkis.appjoint.visualis.dashboard'; @@ -59,4 +57,4 @@ insert into `linkis_config_key_tree` VALUES(NULL,@key_id2,@tree_id1); insert into `linkis_config_key_tree` VALUES(NULL,@key_id3,@tree_id1); insert into `linkis_config_key_tree` VALUES(NULL,@key_id4,@tree_id1); insert into `linkis_config_key_tree` VALUES(NULL,@key_id5,@tree_id1); -insert into `linkis_config_key_tree` VALUES(NULL,@key_id6,@tree_id2); \ No newline at end of file +insert into `linkis_config_key_tree` VALUES(NULL,@key_id6,@tree_id2); diff --git a/db/qualitis.sql b/db/qualitis.sql new file mode 100644 index 0000000000..04fb15cfb2 --- /dev/null +++ b/db/qualitis.sql @@ -0,0 +1,3 @@ +INSERT INTO `dss_application` (`id`, `name`, `url`, `is_user_need_init`, `level`, `user_init_url`, `exists_project_service`, `project_url`, `enhance_json`, `if_iframe`, `homepage_url`, `redirect_url`) VALUES (NULL, 'qualitis', 'http://QUALITIS_ADRESS_IP_2:QUALITIS_ADRESS_PORT', '0', '1', NULL, '1', 'http://QUALITIS_ADRESS_IP_2:QUALITIS_ADRESS_PORT/#/projects/list?id=${projectId}&flow=true', NULL, '1', 'http://QUALITIS_ADRESS_IP_2:QUALITIS_ADRESS_PORT/#/dashboard', 'http://QUALITIS_ADRESS_IP_2:QUALITIS_ADRESS_PORT/qualitis/api/v1/redirect'); +SELECT @qualitis_appid:=id from dss_application WHERE `name` = 'qualitis'; +INSERT INTO `dss_workflow_node` (`id`, `icon`, `node_type`, `application_id`, `submit_to_scheduler`, `enable_copy`, `should_creation_before_node`, `support_jump`, `jump_url`) VALUES (NULL, NULL, 'linkis.appjoint.qualitis', @qualitis_appid, NULL, '1', '0', '1', 'http://QUALITIS_ADRESS_IP_2:QUALITIS_ADRESS_PORT/#/addGroupTechniqueRule?tableType=1&id=${projectId}&ruleGroupId=${ruleGroupId}&nodeId=${nodeId}'); \ No newline at end of file diff --git "a/docs/zh_CN/ch1/DSS\345\256\211\350\243\205\345\270\270\350\247\201\351\227\256\351\242\230\345\210\227\350\241\250.md" "b/docs/zh_CN/ch1/DSS\345\256\211\350\243\205\345\270\270\350\247\201\351\227\256\351\242\230\345\210\227\350\241\250.md" new file mode 100644 index 0000000000..0b9e779828 --- /dev/null +++ "b/docs/zh_CN/ch1/DSS\345\256\211\350\243\205\345\270\270\350\247\201\351\227\256\351\242\230\345\210\227\350\241\250.md" @@ -0,0 +1,114 @@ +## DSS安装常见问题列表 + +**本文档汇总DSS安装过程中所有问题列表及解决方式,为社区用户安装DSS提供参考。** + + +#### (1) 创建工程提示用户token为空 + +``` +sudo vi dss-server/conf/token.properties +``` + +添加用户 + +``` +xxx=xxx +``` + +#### (2) visualis执行报错,找不到driver驱动 + +``` +Caused by: java.lang.Exception: /data/DSSInstall/visualis-server/bin/phantomjsis not executable! +``` + +下载 [driver驱动](https://phantomjs.org/download.html),把phantomjs二进制文件放入visualis-server的bin目录下即可。 + + +#### (3) dss-0.5.0简单版DSS创建工程失败 + + +删除数据库中表dss_application的schedulis和qualitis记录 + + +#### (4) DSS多次重复安装后报错:TooManyResultsException:Expected on result + + +删除数据库中表linkis_user、dss_user、linkis_application中的重复记录 + +#### (5) 访问前端出错 + +``` +Unexpected token { in JSON at position 4 +``` + +检查前端配置文件中linkis gateway的url配置 + +#### (6) DSS创建工程报表linkis.linkis_resources_task不存在 + +``` +Cause: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Table 'linkis.linkis_resources_task' doesn't exist +``` + +进入mysql,选择linkis的数据库,手动执行linkis安装目录中,db/moudle/linkis-bml.sql文件。 + +#### (7)如何配置ladp登陆 + +修改linkis-gateway配置目录下的linkis.properties文件,增加LDAP登陆认证。 + + +#### (8) visualis可视化服务访问报错 + +a) 确保visualis-server已经启动。 + +b)检查visualis-server安装目录下的application.yml配置,确保以下配置准确无误 + +``` + url: http://0.0.0.0:0000/dss/visualis 此处url中的IP和端口必须保持与DSS前端Nginx访问的IP地址和端口一致 + access: + address: 0.0.0.0 #frontend address,此处保持与DSS前端Nginx访问IP地址一致 + port: 0000#frontend port,此处保持与DSS前端Nginx访问端口一致 +``` + +c) 确保数据库表dss_application中 visualis记录行,访问地址与DSS前端Nginx访问IP地址和端口一致。 + +d) 访问visualis出现404错误,确保Nginx配置文件中关于visualis的访问路径配置正确。 + +``` + location /dss/visualis { + root /data/DSSFront; # 示例visualis前端静态文件目录 + autoindex on; + } + location / { + root /data/DSSFront/dist; # 示例DSS前端静态文件目录 + index index.html index.html; + } +``` + + +#### (9)上传文件大小限制 + +``` +sudo vi /etc/nginx/nginx.conf +``` + +更改上传大小 + +``` +client_max_body_size 200m +``` + +#### (10)接口超时 + +``` +sudo vi /etc/nginx/conf.d/dss.conf +``` + + +更改接口超时时间 + +``` +proxy_read_timeout 600s +``` + +**如果您在安装和使用DSS过程中遇到Linkis相关问题,请访问** +[linkis常见问题列表](https://github.com/WeBankFinTech/Linkis/wiki/%E9%83%A8%E7%BD%B2%E5%92%8C%E7%BC%96%E8%AF%91%E9%97%AE%E9%A2%98%E6%80%BB%E7%BB%93) diff --git "a/docs/zh_CN/ch2/DSS\345\277\253\351\200\237\345\256\211\350\243\205\344\275\277\347\224\250\346\226\207\346\241\243.md" "b/docs/zh_CN/ch2/DSS\345\277\253\351\200\237\345\256\211\350\243\205\344\275\277\347\224\250\346\226\207\346\241\243.md" index c6b62101ef..1f63fdfed7 100644 --- "a/docs/zh_CN/ch2/DSS\345\277\253\351\200\237\345\256\211\350\243\205\344\275\277\347\224\250\346\226\207\346\241\243.md" +++ "b/docs/zh_CN/ch2/DSS\345\277\253\351\200\237\345\256\211\350\243\205\344\275\277\347\224\250\346\226\207\346\241\243.md" @@ -85,7 +85,7 @@ Nginx,[如何安装Nginx](https://www.tecmint.com/install-nginx-on-centos-7/) RESULT_SET_ROOT_PATH=hdfs:///tmp/linkis # 结果集文件路径,用于存储Job的结果集文件 #用于DATACHECK校验 - HIVE_META_URL=jdbc:mysql://127.0.0.1:3306/linkis?characterEncoding=UTF-8 + HIVE_META_URL=jdbc:mysql://127.0.0.1:3306/hivemeta?characterEncoding=UTF-8 HIVE_META_USER=xxx HIVE_META_PASSWORD=xxx @@ -98,7 +98,7 @@ Nginx,[如何安装Nginx](https://www.tecmint.com/install-nginx-on-centos-7/) ``` ```properties - # 设置DSS-Server和Eventchecker AppJoint的数据库的连接信息。 + # 设置DSS-Server和Eventchecker AppJoint的数据库的连接信息,需要和linkis保持同库 MYSQL_HOST= MYSQL_PORT= MYSQL_DB= @@ -201,10 +201,10 @@ Azkaban [如何安装Azkaban](https://github.com/azkaban/azkaban) RESULT_SET_ROOT_PATH=hdfs:///tmp/linkis # 结果集文件路径,用于存储Job的结果集文件 - WDS_SCHEDULER_PATH=file:///appcom/tmp/wds/scheduler #Azkaban工程存储目录 + WDS_SCHEDULER_PATH=file:///appcom/tmp/wds/scheduler #DSS工程转换成Azkaban工程后zip包的存储路径 #1、用于DATACHECK - HIVE_META_URL=jdbc:mysql://127.0.0.1:3306/linkis?characterEncoding=UTF-8 + HIVE_META_URL=jdbc:mysql://127.0.0.1:3306/hivemeta?characterEncoding=UTF-8 HIVE_META_USER=xxx HIVE_META_PASSWORD=xxx #2、用于Qualitis @@ -223,7 +223,7 @@ Azkaban [如何安装Azkaban](https://github.com/azkaban/azkaban) ``` ```properties - # 设置DSS-Server和Eventchecker AppJoint的数据库的连接信息。 + # 设置DSS-Server和Eventchecker AppJoint的数据库的连接信息,需要和linkis保持同库 MYSQL_HOST= MYSQL_PORT= MYSQL_DB= @@ -341,7 +341,7 @@ dss_ipaddr=$(ip addr | awk '/^[0-9]+: / {}; /inet.*global/ {print gensub(/(.*)\/ 添加如下内容: ``` server { - listen 8080;# 访问端口 + listen 8088;# 访问端口 server_name localhost; #charset koi8-r; #access_log /var/log/nginx/host.access.log main; @@ -395,65 +395,10 @@ server { ### 4.谷歌浏览器访问: ```http://nginx_ip:nginx_port``` -如何详细使用DSS, 点我进入 [DSS详细使用文档](https://github.com/WeBankFinTech/DataSphereStudio/blob/master/docs/zh_CN/ch3/DSS_User_Manual.md) +**试用用户和密码均为部署用户,更多用户配置,详见** [Linkis LDAP](https://github.com/WeBankFinTech/Linkis/wiki/%E9%83%A8%E7%BD%B2%E5%92%8C%E7%BC%96%E8%AF%91%E9%97%AE%E9%A2%98%E6%80%BB%E7%BB%93) -## 4.3、常见问题 - -(1)用户token为空 - -``` -sudo vi dss-server/conf/token.properties -``` - -添加用户 - -``` -xxx=xxx -``` - -(2)visualis执行报错 - -``` -Caused by: java.lang.Exception: /data/DSSInstall/visualis-server/bin/phantomjsis not executable! -``` - -下载 [driver驱动](https://phantomjs.org/download.html),把phantomjs二进制文件放入visualis-server的bin目录下即可。 - - -(3)简单版DSS创建工程失败 - - -删除数据库中表dss_application的schedulis和qualitis记录 - - -(4)多次重复安装后报错:TooManyResultsException:Expected on result +如何详细使用DSS, 点我进入 [DSS快速使用文档](https://github.com/WeBankFinTech/DataSphereStudio/blob/master/docs/zh_CN/ch3/DSS_User_Manual.md) +## 4.3、常见问题 -删除数据库中表linkis_user和dss_user中的重复记录 - - -(5)上传文件大小限制 - -``` -sudo vi /etc/nginx/nginx.conf -``` - -更改上传大小 - -``` -client_max_body_size 200m -``` - - (6)接口超时 - -``` -sudo vi /etc/nginx/conf.d/dss.conf -``` - - -更改接口超时时间 - -``` -proxy_read_timeout 600s -``` - +[DSS安装常见问题](https://github.com/WeBankFinTech/DataSphereStudio/blob/master/docs/zh_CN/ch1/DSS%E5%AE%89%E8%A3%85%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98%E5%88%97%E8%A1%A8.md) \ No newline at end of file diff --git a/docs/zh_CN/ch3/DSS_User_Manual.md b/docs/zh_CN/ch3/DSS_User_Manual.md index 470ae9c7c2..f366e5de31 100644 --- a/docs/zh_CN/ch3/DSS_User_Manual.md +++ b/docs/zh_CN/ch3/DSS_User_Manual.md @@ -1,3 +1,9 @@ +## 快速登录 +      为了方便用户使用,系统默认通过使用Linkis的部署用户名进行登录,比如是hadoop部署的可以直接通过 用户:hadoop,密码:hadoop(密码就是用户名)来进行登录。 首先输入前端容器地址:192.168.xx.xx:8888 接着输入用户名密码:hadoop/hadoop +![quick_start00](/images/zh_CN/chapter3/quickstart/quick_start00.png) + +__注意:__ 如果要支持多用户登录,DSS的用户登录依赖Linkis,需要在linkis-GateWay的配置里面进行配置,Linkis-GateWay默认支持LDAP。 + ## 1 功能简介        DSS作为一站式数据应用开发门户,定位为闭环涵盖数据应用的全流程,满足从数据ETL、数据研发、可视化展现、数据治理、数据输出到工作流调度的数据应用全生命周期开发场景,现已经开源的组件包括如下图所示: diff --git "a/docs/zh_CN/ch4/DSS\345\267\245\347\250\213\345\217\221\345\270\203\350\260\203\345\272\246\347\263\273\347\273\237\346\236\266\346\236\204\350\256\276\350\256\241.md" "b/docs/zh_CN/ch4/DSS\345\267\245\347\250\213\345\217\221\345\270\203\350\260\203\345\272\246\347\263\273\347\273\237\346\236\266\346\236\204\350\256\276\350\256\241.md" new file mode 100644 index 0000000000..14dbccac55 --- /dev/null +++ "b/docs/zh_CN/ch4/DSS\345\267\245\347\250\213\345\217\221\345\270\203\350\260\203\345\272\246\347\263\273\347\273\237\346\236\266\346\236\204\350\256\276\350\256\241.md" @@ -0,0 +1,30 @@ +# DataSphere Studio发布调度系统架构设计 + + + +## 一、背景 + + 目前在大数据领域存在许多种批量定时调度系统,如Azkaban、Airflow、EasyScheduler等,DSS支持将设计好的DAG工作流 +发布到不同的调度系统,系统默认支持了发布到Azkaban的实现。在DSS中主要完工作流的编排设计,节点的参数设置, +脚本代码编写,图表设计等需要交互式的操作,还可以在DSS中实时执行,并调试好所有节点的可执行代码。发布到调度系统后 +,由调度系统根据定时任务的配置,定时调度执行。 + +## 二、架构设计 + +![发布调度架构图](../../../images/zh_CN/charpter3/publish/publichtoscheduling.png) + +## 三、发布流程 + +(1)从数据库读取最新版本的工程、工作流信息,获取所有的保存在BML库工作流JSON文件。 + +(2)将上面的数据库内容,JSON文件内容分别转成DSS中的DWSProject,DWSFlow,如果存在子flow,则需要一并设置到flow中,保持原来的层级关系和依赖关系,构建好DWSProject,其中包含了工程下所有的DWSFlow。 + 一个工作流JSON包含了所有节点的定义,并存储了节点之间的依赖关系,以及工作流自身的属性信息。 + +(3)将DWSProject经过工程转换器转成SchedulerProject,转成SchedulerProject的过程中,同时完成了DWSJSONFlow到SchedulerFlow的转换,也完成了DWSNode到SchedulerNode的转换。 + +(4)使用ProjectTuning对整个SchedulerProject工程进行tuning操作,用于完成工程发布前的整体调整操作,在Azkaban的实现中主要完成了工程的路径设置和工作流的存储路径设置。 + +(5)ProjectPublishHook操作,hook可以根据不同的调度系统进行实现,且可分为发布前的hook和发布后的hook,这些都会被统一执行。 + 发布前的hook包含对工程的解析,工作流的解析,节点的解析,以及生成对应的资源文件,属性文件,节点描述文件等。这个需要根据不同的调度系统进行实现。 + +(6)发布工程,打包好经过转换、解析生成的工程目录文件,并上传到对应的调度系统。 diff --git a/dss-flow-execution-entrance/bin/start-dss-flow-execution-entrance.sh b/dss-flow-execution-entrance/bin/start-dss-flow-execution-entrance.sh index 5ce15dc90f..9bd4a006cb 100644 --- a/dss-flow-execution-entrance/bin/start-dss-flow-execution-entrance.sh +++ b/dss-flow-execution-entrance/bin/start-dss-flow-execution-entrance.sh @@ -1,33 +1,49 @@ #!/bin/bash - cd `dirname $0` cd .. HOME=`pwd` -export DWS_ENGINE_MANAGER_HOME=$HOME -export DWS_ENGINE_MANAGER_PID=$HOME/bin/linkis.pid +export SERVER_PID=$HOME/bin/linkis.pid +export SERVER_LOG_PATH=$HOME/logs +export SERVER_CLASS=com.webank.wedatasphere.linkis.DataWorkCloudApplication -if [[ -f "${DWS_ENGINE_MANAGER_PID}" ]]; then - pid=$(cat ${DWS_ENGINE_MANAGER_PID}) - if kill -0 ${pid} >/dev/null 2>&1; then - echo "FlowExecution Entrance is already running." - return 0; - fi +if test -z "$SERVER_HEAP_SIZE" +then + export SERVER_HEAP_SIZE="512M" fi -export DWS_ENGINE_MANAGER_LOG_PATH=$HOME/logs -export DWS_ENGINE_MANAGER_HEAP_SIZE="1G" -export DWS_ENGINE_MANAGER_JAVA_OPTS="-Xms$DWS_ENGINE_MANAGER_HEAP_SIZE -Xmx$DWS_ENGINE_MANAGER_HEAP_SIZE -XX:+UseG1GC -XX:MaxPermSize=500m -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=11730" +if test -z "$SERVER_JAVA_OPTS" +then + export SERVER_JAVA_OPTS=" -Xmx$SERVER_HEAP_SIZE -XX:+UseG1GC -Xloggc:$HOME/logs/linkis-gc.log" +fi -echo $HOME/lib/ +if [[ -f "${SERVER_PID}" ]]; then + pid=$(cat ${SERVER_PID}) + if kill -0 ${pid} >/dev/null 2>&1; then + echo "Server is already running." + exit 1 + fi +fi -nohup java $DWS_ENGINE_MANAGER_JAVA_OPTS -cp $HOME/conf:$HOME/lib/* com.webank.wedatasphere.linkis.DataWorkCloudApplication 2>&1 > $DWS_ENGINE_MANAGER_LOG_PATH/linkis.out & +nohup java $SERVER_JAVA_OPTS -cp $HOME/conf:$HOME/lib/* $SERVER_CLASS 2>&1 > $SERVER_LOG_PATH/linkis.out & pid=$! if [[ -z "${pid}" ]]; then - echo "FlowExecution Entrance start failed!" + echo "server $SERVER_NAME start failed!" exit 1 else - echo "FlowExecution Entrance start succeeded!" - echo $pid > $DWS_ENGINE_MANAGER_PID + echo "server $SERVER_NAME start succeeded!" + echo $pid > $SERVER_PID sleep 1 fi + + + + + + + + + + + + diff --git a/dss-server/bin/start-dss-server.sh b/dss-server/bin/start-dss-server.sh index e53e44a167..518cd8da17 100644 --- a/dss-server/bin/start-dss-server.sh +++ b/dss-server/bin/start-dss-server.sh @@ -1,33 +1,39 @@ #!/bin/bash - cd `dirname $0` cd .. HOME=`pwd` -export DWS_ENGINE_MANAGER_HOME=$HOME -export DWS_ENGINE_MANAGER_PID=$HOME/bin/linkis.pid +export SERVER_PID=$HOME/bin/linkis.pid +export SERVER_LOG_PATH=$HOME/logs +export SERVER_CLASS=com.webank.wedatasphere.dss.DSSSpringApplication + +if test -z "$SERVER_HEAP_SIZE" +then + export SERVER_HEAP_SIZE="512M" +fi + +if test -z "$SERVER_JAVA_OPTS" +then + export SERVER_JAVA_OPTS=" -Xmx$SERVER_HEAP_SIZE -XX:+UseG1GC -Xloggc:$HOME/logs/linkis-gc.log" +fi -if [[ -f "${DWS_ENGINE_MANAGER_PID}" ]]; then - pid=$(cat ${DWS_ENGINE_MANAGER_PID}) +if [[ -f "${SERVER_PID}" ]]; then + pid=$(cat ${SERVER_PID}) if kill -0 ${pid} >/dev/null 2>&1; then - echo "DSS SERVER is already running." - return 0; + echo "Server is already running." + exit 1 fi fi -export DWS_ENGINE_MANAGER_LOG_PATH=$HOME/logs -export DWS_ENGINE_MANAGER_HEAP_SIZE="1G" -export DWS_ENGINE_MANAGER_JAVA_OPTS="-Xms$DWS_ENGINE_MANAGER_HEAP_SIZE -Xmx$DWS_ENGINE_MANAGER_HEAP_SIZE -XX:+UseG1GC -XX:MaxPermSize=500m -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=11729" - -nohup java $DWS_ENGINE_MANAGER_JAVA_OPTS -cp $HOME/conf:$HOME/lib/* com.webank.wedatasphere.dss.DSSSpringApplication 2>&1 > $DWS_ENGINE_MANAGER_LOG_PATH/linkis.out & +nohup java $SERVER_JAVA_OPTS -cp $HOME/conf:$HOME/lib/* $SERVER_CLASS 2>&1 > $SERVER_LOG_PATH/linkis.out & pid=$! if [[ -z "${pid}" ]]; then - echo "DSS SERVER start failed!" - sleep 1 + echo "server $SERVER_NAME start failed!" exit 1 else - echo "DSS SERVER start succeeded!" - echo $pid > $DWS_ENGINE_MANAGER_PID + echo "server $SERVER_NAME start succeeded!" + echo $pid > $SERVER_PID sleep 1 fi -exit 1 + + diff --git a/plugins/linkis/linkis-appjoint-entrance/bin/start-linkis-appjoint-entrance.sh b/plugins/linkis/linkis-appjoint-entrance/bin/start-linkis-appjoint-entrance.sh index 0128a4d1cd..4436def35d 100644 --- a/plugins/linkis/linkis-appjoint-entrance/bin/start-linkis-appjoint-entrance.sh +++ b/plugins/linkis/linkis-appjoint-entrance/bin/start-linkis-appjoint-entrance.sh @@ -1,36 +1,37 @@ #!/bin/bash - cd `dirname $0` cd .. -HOE=`pwd` - export DWS_ENTRANCE_HOE=$HOE +HOME=`pwd` -export DWS_ENTRANCE_PID=$HOE/bin/linkis-appjoint-entrance.pid +export SERVER_PID=$HOME/bin/linkis.pid +export SERVER_LOG_PATH=$HOME/logs +export SERVER_CLASS=com.webank.wedatasphere.linkis.DataWorkCloudApplication -if [[ -f "${DWS_ENTRANCE_PID}" ]]; then - pid=$(cat ${DWS_ENTRANCE_PID}) - if kill -0 ${pid} >/dev/null 2>&1; then - echo "Entrance is already running." - return 0; - fi +if test -z "$SERVER_HEAP_SIZE" +then + export SERVER_HEAP_SIZE="512M" fi -export DWS_ENTRANCE_LOG_PATH=$HOE/logs -export DWS_ENTRANCE_DEBUG="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=19959" -export DWS_ENTRANCE_HEAP_SIZE="2G" -export DWS_ENTRANCE_JAVA_OPTS="-Xms$DWS_ENTRANCE_HEAP_SIZE -Xmx$DWS_ENTRANCE_HEAP_SIZE -XX:+UseG1GC -XX:MaxPermSize=500m $DWS_ENTRANCE_DEBUG" - -cmd="nohup java $DWS_ENTRANCE_JAVA_OPTS -cp $HOE/conf:$HOE/lib/* com.webank.wedatasphere.linkis.DataWorkCloudApplication 2>&1 > $DWS_ENTRANCE_LOG_PATH/linkis.out &" -#echo "CMD IS $cmd" +if test -z "$SERVER_JAVA_OPTS" +then + export SERVER_JAVA_OPTS=" -Xmx$SERVER_HEAP_SIZE -XX:+UseG1GC -Xloggc:$HOME/logs/linkis-gc.log" +fi +if [[ -f "${SERVER_PID}" ]]; then + pid=$(cat ${SERVER_PID}) + if kill -0 ${pid} >/dev/null 2>&1; then + echo "Server is already running." + exit 1 + fi +fi -nohup java $DWS_ENTRANCE_JAVA_OPTS -cp $HOE/conf:$HOE/lib/* com.webank.wedatasphere.linkis.DataWorkCloudApplication 2>&1 > $DWS_ENTRANCE_LOG_PATH/linkis.out & +nohup java $SERVER_JAVA_OPTS -cp $HOME/conf:$HOME/lib/* $SERVER_CLASS 2>&1 > $SERVER_LOG_PATH/linkis.out & pid=$! if [[ -z "${pid}" ]]; then - echo "AppJoint Entrance start failed!" + echo "server $SERVER_NAME start failed!" exit 1 else - echo "AppJoint Entrance start succeeded!" - echo $pid > $DWS_ENTRANCE_PID + echo "server $SERVER_NAME start succeeded!" + echo $pid > $SERVER_PID sleep 1 fi diff --git a/plugins/linkis/linkis-appjoint-entrance/bin/stop-linkis-appjoint-entrance.sh b/plugins/linkis/linkis-appjoint-entrance/bin/stop-linkis-appjoint-entrance.sh index f3aad1635d..7d47032507 100644 --- a/plugins/linkis/linkis-appjoint-entrance/bin/stop-linkis-appjoint-entrance.sh +++ b/plugins/linkis/linkis-appjoint-entrance/bin/stop-linkis-appjoint-entrance.sh @@ -4,7 +4,7 @@ cd `dirname $0` cd .. HOE=`pwd` -export DWS_ENTRANCE_PID=$HOE/bin/linkis-appjoint-entrance.pid +export DWS_ENTRANCE_PID=$HOE/bin/linkis.pid function wait_for_DWS_ENGINE_MANAGER_to_die() { local pid