diff --git a/cmd/admin/admin.go b/cmd/admin/admin.go new file mode 100644 index 00000000..c2cc8f51 --- /dev/null +++ b/cmd/admin/admin.go @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package admin + +import ( + "context" + "fmt" + "os" + "path/filepath" +) + +import ( + "github.com/spf13/cobra" +) + +import ( + "github.com/arana-db/arana/cmd/cmds" + "github.com/arana-db/arana/pkg/admin" + _ "github.com/arana-db/arana/pkg/admin/router" + "github.com/arana-db/arana/pkg/boot" + "github.com/arana-db/arana/pkg/constants" + "github.com/arana-db/arana/pkg/util/log" +) + +const ( + _keyPort = "port" + _defaultPort = 8080 +) + +func init() { + cmd := &cobra.Command{ + Use: "admin", + Short: "admin", + Example: "arana admin -c bootstrap.yaml -p 8080", + RunE: run, + } + cmd.PersistentFlags(). + StringP(constants.ConfigPathKey, "c", os.Getenv(constants.EnvBootstrapPath), "bootstrap configuration file path") + cmd.PersistentFlags(). + Uint16P(_keyPort, "p", _defaultPort, "listen port") + + cmds.Handle(func(root *cobra.Command) { + root.AddCommand(cmd) + }) +} + +func Run(bootstrapPath string, addr string) error { + discovery := boot.NewDiscovery(bootstrapPath) + if err := discovery.Init(context.Background()); err != nil { + log.Fatal("start admin api server failed: %v", err) + return err + } + adminServer := admin.New(discovery) + return adminServer.Listen(addr) +} + +func run(cmd *cobra.Command, args []string) error { + _ = args + btPath, _ := cmd.PersistentFlags().GetString(constants.ConfigPathKey) + port, _ := cmd.PersistentFlags().GetUint16("port") + if len(btPath) < 1 { + // search bootstrap yaml + for _, path := range constants.GetConfigSearchPathList() { + btPath = filepath.Join(path, "bootstrap.yaml") + if _, err := os.Stat(btPath); err == nil { + break + } + btPath = filepath.Join(path, "bootstrap.yml") + if _, err := os.Stat(btPath); err == nil { + break + } + } + } + + return Run(btPath, fmt.Sprintf(":%d", port)) +} diff --git a/cmd/main.go b/cmd/main.go index ad35125d..edbc1f21 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -22,6 +22,7 @@ import ( ) import ( + _ "github.com/arana-db/arana/cmd/admin" "github.com/arana-db/arana/cmd/cmds" _ "github.com/arana-db/arana/cmd/start" _ "github.com/arana-db/arana/cmd/tools" diff --git a/cmd/start/start.go b/cmd/start/start.go index 2b4b0ad8..523e1e00 100644 --- a/cmd/start/start.go +++ b/cmd/start/start.go @@ -33,7 +33,6 @@ import ( import ( "github.com/arana-db/arana/cmd/cmds" "github.com/arana-db/arana/pkg/boot" - "github.com/arana-db/arana/pkg/config" "github.com/arana-db/arana/pkg/constants" "github.com/arana-db/arana/pkg/executor" "github.com/arana-db/arana/pkg/mysql" @@ -78,37 +77,26 @@ func Run(bootstrapConfigPath string, importPath string) { // print slogan fmt.Printf("\033[92m%s\033[0m\n", slogan) // 92m: light green - provider := boot.NewProvider(bootstrapConfigPath) - if err := provider.Init(context.Background()); err != nil { + discovery := boot.NewDiscovery(bootstrapConfigPath) + if err := discovery.Init(context.Background()); err != nil { log.Fatal("start failed: %v", err) return } if len(importPath) > 0 { - c, err := config.Load(importPath) - if err != nil { - log.Fatal("failed to import configuration from %s: %v", importPath, err) - return - } - if err := provider.GetConfigCenter().ImportConfiguration(c); err != nil { - log.Fatal("failed to import configuration from %s: %v", importPath, err) + if !boot.RunImport(bootstrapConfigPath, importPath) { return } } - if err := boot.Boot(context.Background(), provider); err != nil { + if err := boot.Boot(context.Background(), discovery); err != nil { log.Fatal("start failed: %v", err) return } propeller := server.NewServer() - listenersConf, err := provider.ListListeners(context.Background()) - if err != nil { - log.Fatal("start failed: %v", err) - return - } - + listenersConf := discovery.ListListeners(context.Background()) for _, listenerConf := range listenersConf { listener, err := mysql.NewListener(listenerConf) if err != nil { diff --git a/cmd/tools/tools.go b/cmd/tools/tools.go index 195da51f..46f59c84 100644 --- a/cmd/tools/tools.go +++ b/cmd/tools/tools.go @@ -18,7 +18,6 @@ package tools import ( - "context" "os" ) @@ -29,9 +28,7 @@ import ( import ( "github.com/arana-db/arana/cmd/cmds" "github.com/arana-db/arana/pkg/boot" - "github.com/arana-db/arana/pkg/config" "github.com/arana-db/arana/pkg/constants" - "github.com/arana-db/arana/pkg/util/log" ) var ( @@ -45,7 +42,7 @@ func init() { Use: "import", Short: "import arana config", Example: "./arana import -c ../docker/conf/bootstrap.yaml -s ../docker/conf/config.yaml", - Run: Run, + Run: run, } cmd.PersistentFlags(). @@ -58,25 +55,10 @@ func init() { }) } -func Run(cmd *cobra.Command, args []string) { - _, _ = cmd, args - - provider := boot.NewProvider(importBootConfPath) - if err := provider.Init(context.Background()); err != nil { - log.Fatal("init failed: %+v", err) - return - } - - cfg, err := config.Load(sourceConfigPath) - if err != nil { - log.Fatal("load config from %s failed: %+v", sourceConfigPath, err) - return - } - - c := provider.GetConfigCenter() +func run(_ *cobra.Command, _ []string) { + Run(importBootConfPath, sourceConfigPath) +} - if err := c.ImportConfiguration(cfg); err != nil { - log.Fatal("persist config to config.store failed: %+v", err) - return - } +func Run(importConfPath, configPath string) { + boot.RunImport(importConfPath, configPath) } diff --git a/conf/bootstrap.local-etcd.yaml b/conf/bootstrap.local-etcd.yaml new file mode 100644 index 00000000..f460765f --- /dev/null +++ b/conf/bootstrap.local-etcd.yaml @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +config: + name: etcd + options: + endpoints: "http://127.0.0.1:2379" diff --git a/conf/bootstrap.yaml b/conf/bootstrap.yaml index dd15f743..4042c45c 100644 --- a/conf/bootstrap.yaml +++ b/conf/bootstrap.yaml @@ -15,19 +15,29 @@ # limitations under the License. # +kind: ConfigMap +apiVersion: "1.0" +listeners: + - protocol_type: mysql + server_version: 5.7.0 + socket_address: + address: 0.0.0.0 + port: 13306 config: name: file options: - # name: etcd - # options: - # endpoints: "http://localhost:2379" - # name: nacos - # options: - # endpoints: "localhost:8080" - # namespace: arana - # group: arana - # contextPath: /nacos - # scheme: http - # username: nacos - # password: nacos \ No newline at end of file +# name: etcd +# root_path: arana +# options: +# endpoints: "http://127.0.0.1:2379" + +# name: nacos +# options: +# endpoints: "127.0.0.1:8848" +# namespace: arana +# group: arana +# contextPath: /nacos +# scheme: http +# username: nacos +# password: nacos diff --git a/conf/config.yaml b/conf/config.yaml index f5b016fe..eade8b61 100644 --- a/conf/config.yaml +++ b/conf/config.yaml @@ -20,13 +20,6 @@ apiVersion: "1.0" metadata: name: arana-config data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - tenants: - name: arana users: @@ -34,107 +27,92 @@ data: password: "123456" - username: arana password: "123456" - - clusters: - - name: employees - type: mysql - sql_max_limit: -1 - tenant: arana - parameters: - max_allowed_packet: 256M - groups: - - name: employees_0000 - nodes: - - name: node0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000 - weight: r10w10 - parameters: - - name: node0_r_0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000_r - weight: r0w0 - - name: employees_0001 - nodes: - - name: node1 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0001 - weight: r10w10 - - name: employees_0002 - nodes: - - name: node2 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0002 - weight: r10w10 - - name: employees_0003 - nodes: - - name: node3 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0003 - weight: r10w10 - - name: employees_shadow - nodes: - - name: node_shadow - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_show - weight: r10w10 - sharding_rule: - tables: - - name: employees.student - allow_full_scan: true - sequence: - type: snowflake - option: - db_rules: - - column: uid - type: scriptExpr - expr: parseInt($value % 32 / 8) - tbl_rules: - - column: uid - type: scriptExpr - expr: $value % 32 - step: 32 - topology: - db_pattern: employees_${0000..0003} - tbl_pattern: student_${0000..0031} - attributes: - sqlMaxLimit: -1 - - shadow_rule: - tables: - - name: student - enable: false - group_node: employees_shadow - match_rules: - - operation: [insert,update] - match_type: value - attributes: + clusters: + - name: employees + type: mysql + sql_max_limit: -1 + tenant: arana + parameters: + max_allowed_packet: 256M + groups: + - name: employees_0000 + nodes: + - node0 + - node0_r_0 + - name: employees_0001 + nodes: + - node1 + - name: employees_0002 + nodes: + - node2 + - name: employees_0003 + nodes: + - node3 + sharding_rule: + tables: + - name: employees.student + allow_full_scan: true + sequence: + type: snowflake + option: + db_rules: - column: uid - value: 10000 - - operation: [delete] - match_type: regex - attributes: - - column: name - regex: "^hanmeimei$" - - operation: [select] - match_type: hint + type: scriptExpr + expr: parseInt($value % 32 / 8) + tbl_rules: + - column: uid + type: scriptExpr + expr: $value % 32 + step: 32 + topology: + db_pattern: employees_${0000..0003} + tbl_pattern: student_${0000..0031} attributes: - - shadow: true + sqlMaxLimit: -1 + nodes: + node0: + name: node0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000 + weight: r10w10 + parameters: + node0_r_0: + name: node0_r_0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000_r + weight: r0w0 + parameters: + node1: + name: node1 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0001 + weight: r10w10 + parameters: + node2: + name: node2 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0002 + weight: r10w10 + parameters: + node3: + name: node3 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0003 + weight: r10w10 + parameters: + diff --git a/docker-compose.yaml b/docker-compose.yaml index ab8a26c2..38c81555 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -52,7 +52,6 @@ services: retries: 5 arana: - build: . container_name: arana image: aranadb/arana:master entrypoint: [ "arana", "start", "-c", "/etc/arana/bootstrap.yaml", "--import", "/etc/arana/config.yaml" ] @@ -64,10 +63,23 @@ services: - ./conf/config.yaml:/etc/arana/config.yaml:ro - ./conf/bootstrap.docker.yaml:/etc/arana/bootstrap.yaml:ro depends_on: - - arana-admin - mysql - etcd + arana-admin: + container_name: arana-admin + image: aranadb/arana:master + entrypoint: [ "arana", "admin", "-c", "/etc/arana/bootstrap.yaml", "-p", "8088" ] + networks: + - local + ports: + - "8088:8088" + volumes: + - ./conf/config.yaml:/etc/arana/config.yaml:ro + - ./conf/bootstrap.docker.yaml:/etc/arana/bootstrap.yaml:ro + depends_on: + - etcd + networks: local: external: false diff --git a/example/admin_server/main.go b/example/admin_server/main.go new file mode 100644 index 00000000..9778e0c6 --- /dev/null +++ b/example/admin_server/main.go @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "github.com/arana-db/arana/cmd/admin" + "github.com/arana-db/arana/testdata" +) + +func main() { + bootstrap := testdata.Path("../conf/bootstrap.local-etcd.yaml") + _ = admin.Run(bootstrap, ":8080") +} diff --git a/example/import_config/main.go b/example/import_config/main.go new file mode 100644 index 00000000..6e2d882d --- /dev/null +++ b/example/import_config/main.go @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "github.com/arana-db/arana/cmd/tools" + "github.com/arana-db/arana/testdata" +) + +func main() { + bootstrap := testdata.Path("../conf/bootstrap.yaml") + config := testdata.Path("../conf/config.yaml") + tools.Run(bootstrap, config) +} diff --git a/go.mod b/go.mod index 30b4bc37..841b92f5 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,8 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 github.com/dop251/goja v0.0.0-20220422102209-3faab1d8f20e github.com/dubbogo/gost v1.12.3 - github.com/go-playground/validator/v10 v10.10.1 + github.com/gin-gonic/gin v1.8.1 + github.com/go-playground/validator/v10 v10.11.0 github.com/go-sql-driver/mysql v1.6.0 github.com/golang/mock v1.5.0 github.com/hashicorp/golang-lru v0.5.4 @@ -19,7 +20,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/spf13/cobra v1.2.1 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.7.2 github.com/testcontainers/testcontainers-go v0.12.0 github.com/tidwall/gjson v1.14.0 go.etcd.io/etcd/api/v3 v3.5.1 @@ -32,7 +33,7 @@ require ( golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/text v0.3.7 - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b + gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -55,12 +56,14 @@ require ( github.com/docker/go-units v0.4.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/form3tech-oss/jwt-go v3.2.2+incompatible // indirect + github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/goccy/go-json v0.9.10 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -74,20 +77,22 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect - github.com/json-iterator/go v1.1.11 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/magiconair/properties v1.8.5 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.5.0 // indirect github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.2 // indirect github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 // indirect github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -100,6 +105,7 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect + github.com/ugorji/go/codec v1.2.7 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect go.etcd.io/bbolt v1.3.5 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect @@ -108,13 +114,13 @@ require ( go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect - golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect - golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect + golang.org/x/net v0.0.0-20220725212005-46097bf591d3 // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247 // indirect google.golang.org/grpc v1.42.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.62.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 10e0f92a..f7563824 100644 --- a/go.sum +++ b/go.sum @@ -335,6 +335,10 @@ github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JY github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -369,8 +373,8 @@ github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.10.1 h1:uA0+amWMiglNZKZ9FJRKUAe9U3RX91eVn1JYXMWt7ig= -github.com/go-playground/validator/v10 v10.10.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= @@ -379,6 +383,8 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.9.10 h1:hCeNmprSNLB8B8vQKWl6DpuH0t60oEs+TAk9a7CScKc= +github.com/goccy/go-json v0.9.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -555,8 +561,9 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -610,6 +617,8 @@ github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -644,8 +653,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -726,6 +736,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -867,8 +879,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -893,6 +906,9 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/toolkits/concurrent v0.0.0-20150624120057-a4371d70e3e3/go.mod h1:QDlpd3qS71vYtakd2hmdpqhJ9nwv6mD6A30bQ1BPBFE= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -996,8 +1012,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1090,8 +1107,9 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220725212005-46097bf591d3 h1:2yWTtPWWRcISTw3/o+s/Y4UOMnQL71DWyToOANFusCg= +golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1204,13 +1222,15 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1420,8 +1440,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1460,8 +1481,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/integration_test/config/db/config.yaml b/integration_test/config/db/config.yaml index 458751b9..8637ff65 100644 --- a/integration_test/config/db/config.yaml +++ b/integration_test/config/db/config.yaml @@ -20,13 +20,6 @@ apiVersion: "1.0" metadata: name: arana-config data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - tenants: - name: arana users: @@ -34,68 +27,75 @@ data: password: "123456" - username: arana password: "123456" - - clusters: - - name: employees - type: mysql - sql_max_limit: -1 - tenant: arana - groups: - - name: employees_0000 - nodes: - - name: node0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000 - weight: r10w10 - - name: employees_0001 - nodes: - - name: node1 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0001 - weight: r10w10 - - name: employees_0002 - nodes: - - name: node2 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0002 - weight: r10w10 - - name: employees_0003 - nodes: - - name: node3 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0003 - weight: r10w10 - - sharding_rule: - tables: - - name: employees.student - allow_full_scan: true - sequence: - type: snowflake - option: - db_rules: - - column: uid - type: scriptExpr - expr: parseInt($value % 32 / 8) - step: 32 - tbl_rules: - - column: uid - type: scriptExpr - expr: parseInt(0) - topology: - db_pattern: employees_${0000...0003} - tbl_pattern: student_0000 - attributes: - sqlMaxLimit: -1 + clusters: + - name: employees + type: mysql + sql_max_limit: -1 + tenant: arana + groups: + - name: employees_0000 + nodes: + - node0 + - name: employees_0001 + nodes: + - node1 + - name: employees_0002 + nodes: + - node2 + - name: employees_0003 + nodes: + - node3 + sharding_rule: + tables: + - name: employees.student + allow_full_scan: true + sequence: + type: snowflake + option: + db_rules: + - column: uid + type: scriptExpr + expr: parseInt($value % 32 / 8) + step: 32 + tbl_rules: + - column: uid + type: scriptExpr + expr: parseInt(0) + topology: + db_pattern: employees_${0000...0003} + tbl_pattern: student_0000 + attributes: + sqlMaxLimit: -1 + nodes: + node0: + name: node0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000 + weight: r10w10 + node1: + name: node1 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0001 + weight: r10w10 + node2: + name: node2 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0002 + weight: r10w10 + node3: + name: node3 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0003 + weight: r10w10 diff --git a/integration_test/config/db_tbl/config.yaml b/integration_test/config/db_tbl/config.yaml index b3d9702a..d697962a 100644 --- a/integration_test/config/db_tbl/config.yaml +++ b/integration_test/config/db_tbl/config.yaml @@ -20,13 +20,6 @@ apiVersion: "1.0" metadata: name: arana-config data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - tenants: - name: arana users: @@ -34,74 +27,83 @@ data: password: "123456" - username: arana password: "123456" - - clusters: - - name: employees - type: mysql - sql_max_limit: -1 - tenant: arana - groups: - - name: employees_0000 - nodes: - - name: node0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000 - weight: r10w10 - - name: node0_r_0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000_r - weight: r0w0 - - name: employees_0001 - nodes: - - name: node1 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0001 - weight: r10w10 - - name: employees_0002 - nodes: - - name: node2 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0002 - weight: r10w10 - - name: employees_0003 - nodes: - - name: node3 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0003 - weight: r10w10 - - sharding_rule: - tables: - - name: employees.student - allow_full_scan: true - sequence: - type: snowflake - option: - db_rules: - - column: uid - type: scriptExpr - expr: parseInt($value % 32 / 8) - tbl_rules: - - column: uid - type: scriptExpr - expr: $value % 32 - topology: - db_pattern: employees_${0000..0003} - tbl_pattern: student_${0000..0031} - attributes: - sqlMaxLimit: -1 + clusters: + - name: employees + type: mysql + sql_max_limit: -1 + tenant: arana + groups: + - name: employees_0000 + nodes: + - node0 + - node0_r_0 + - name: employees_0001 + nodes: + - node1 + - name: employees_0002 + nodes: + - node2 + - name: employees_0003 + nodes: + - node3 + sharding_rule: + tables: + - name: employees.student + allow_full_scan: true + sequence: + type: snowflake + option: + db_rules: + - column: uid + type: scriptExpr + expr: parseInt($value % 32 / 8) + tbl_rules: + - column: uid + type: scriptExpr + expr: $value % 32 + topology: + db_pattern: employees_${0000..0003} + tbl_pattern: student_${0000..0031} + attributes: + sqlMaxLimit: -1 + nodes: + node0: + name: node0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000 + weight: r10w10 + node0_r_0: + name: node0_r_0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000_r + weight: r0w0 + node1: + name: node1 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0001 + weight: r10w10 + node2: + name: node2 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0002 + weight: r10w10 + node3: + name: node3 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0003 + weight: r10w10 diff --git a/integration_test/config/tbl/config.yaml b/integration_test/config/tbl/config.yaml index b4698084..2cee24b6 100644 --- a/integration_test/config/tbl/config.yaml +++ b/integration_test/config/tbl/config.yaml @@ -20,13 +20,6 @@ apiVersion: "1.0" metadata: name: arana-config data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - tenants: - name: arana users: @@ -34,40 +27,41 @@ data: password: "123456" - username: arana password: "123456" - - clusters: - - name: employees - type: mysql - sql_max_limit: -1 - tenant: arana - groups: - - name: employees_0000 - nodes: - - name: node0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000 - weight: r10w10 - - sharding_rule: - tables: - - name: employees.student - allow_full_scan: true - sequence: - type: snowflake - option: - db_rules: - - column: uid - type: scriptExpr - expr: parseInt(0) - tbl_rules: - - column: uid - type: scriptExpr - expr: $value % 32 - topology: - db_pattern: employees_0000 - tbl_pattern: student_${0000..0031} - attributes: - sqlMaxLimit: -1 + clusters: + - name: employees + type: mysql + sql_max_limit: -1 + tenant: arana + groups: + - name: employees_0000 + nodes: + - node0 + sharding_rule: + tables: + - name: employees.student + allow_full_scan: true + sequence: + type: snowflake + option: + db_rules: + - column: uid + type: scriptExpr + expr: parseInt(0) + tbl_rules: + - column: uid + type: scriptExpr + expr: $value % 32 + topology: + db_pattern: employees_0000 + tbl_pattern: student_${0000..0031} + attributes: + sqlMaxLimit: -1 + nodes: + node0: + name: node0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000 + weight: r10w10 diff --git a/pkg/admin/admin.api.yaml b/pkg/admin/admin.api.yaml new file mode 100644 index 00000000..25060888 --- /dev/null +++ b/pkg/admin/admin.api.yaml @@ -0,0 +1,361 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +openapi: 3.0.3 +info: + title: Arana + description: Arana + version: 1.0.0 +servers: + - url: 'http://127.0.0.1:8080/' +paths: + /tenants: + get: + operationId: listTenants + summary: List all tenants + responses: + '200': + description: All Tenants + content: + application/json: + schema: + $ref: '#/components/schemas/Tenants' + + post: + operationId: createTenant + summary: Create a tenant + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tenant' + responses: + '201': + description: OK + + /tenants/{tenantName}: + get: + operationId: getTenant + summary: Get a tenant + responses: + '200': + description: Single Tenant + content: + application/json: + schema: + $ref: '#/components/schemas/Tenant' + delete: + operationId: deleteTenant + summary: Delete a tenant + responses: + '204': + description: NONE + put: + operationId: putTenant + summary: Update a tenant + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Tenant' + responses: + '200': + description: OK + + /tenants/{tenantName}/nodes: + get: + operationId: listNodes + summary: List mysql nodes + responses: + '200': + description: All MySQL Nodes + content: + application/json: + schema: + $ref: '#/components/schemas/Nodes' + + post: + operationId: createNode + summary: Create mysql node + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + responses: + '200': + description: OK + + /tenants/{tenantName}/nodes/{nodeName}: + get: + operationId: getNode + summary: Get a mysql node + responses: + '200': + description: Single MySQL Node + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + delete: + operationId: deleteNode + summary: Delete a mysql node + responses: + '204': + description: NONE + + put: + operationId: putNode + summary: Update a mysql node + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Node' + responses: + '200': + description: OK + + /tenants/{tenantName}/groups: + post: + operationId: createGroup + summary: Create a DB group + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Group' + responses: + '201': + description: OK + + get: + operationId: listGroups + summary: List all DB groups + responses: + '200': + description: All groups + content: + application/json: + schema: + $ref: '#/components/schemas/Groups' + + /tenants/{tenantName}/groups/{groupName}: + get: + operationId: getGroup + summary: Get a DB group + responses: + '200': + description: Single DB group + content: + application/json: + schema: + $ref: '#/components/schemas/Group' + put: + operationId: putGroup + summary: Update a DB group + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Group' + responses: + '200': + description: OK + + delete: + operationId: deleteGroup + summary: Delete a DB group + responses: + '204': + description: NONE + + /tenants/{tenantName}/clusters: + post: + operationId: createCluster + summary: Create a cluster + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Cluster' + responses: + '200': + description: OK + + get: + operationId: listClusters + summary: List all clusters + responses: + '200': + description: All Clusters + content: + application/json: + schema: + $ref: '#/components/schemas/Clusters' + + /tenants/{tenantName}/clusters/{clusterName}: + get: + operationId: getCluster + summary: Get a cluster + responses: + '200': + description: Single Cluster + content: + application/json: + schema: + $ref: '#/components/schemas/Cluster' + + put: + operationId: putCluster + summary: Update a cluster + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Cluster' + responses: + '200': + description: OK + + delete: + operationId: deleteCluster + summary: Delete a cluster + responses: + '204': + description: NONE + +components: + schemas: + Tenant: + type: object + properties: + name: + type: string + users: + type: array + items: + type: object + properties: + username: + type: string + password: + type: string + example: + name: "foobar" + users: + - username: "tom" + password: "12345678" + - username: "john" + password: "12345678" + Tenants: + type: array + items: + $ref: '#/components/schemas/Tenant' + + Node: + type: object + required: + - name + - host + - username + - password + - database + - weight + properties: + name: + type: string + host: + type: string + port: + type: integer + username: + type: string + password: + type: string + database: + type: string + weight: + type: string + parameters: + type: object + example: + name: mysql-axfwq87 + host: 1.2.3.4 + port: 3306 + username: root + password: 12345678 + database: employees_0000 + weight: r10w10 + + Nodes: + type: array + items: + $ref: '#/components/schemas/Node' + + Group: + type: object + required: + - name + - nodes + properties: + name: + type: string + nodes: + type: array + items: + type: string + example: + name: employees_0000 + nodes: + - mysql-fwijfo8 + - mysql-we7nvil + - mysql-vjm24if + + Groups: + type: array + items: + $ref: '#/components/schemas/Group' + + Cluster: + type: object + properties: + name: + type: string + type: + type: string + groups: + type: array + items: + type: string + example: + name: employees + type: mysql + groups: + - employees_0000 + - employees_0001 + - employees_0002 + - employees_0003 + - employees_0004 + - employees_0005 + - employees_0006 + - employees_0007 + + Clusters: + type: array + items: + $ref: '#/components/schemas/Cluster' diff --git a/pkg/admin/admin.go b/pkg/admin/admin.go new file mode 100644 index 00000000..0951329c --- /dev/null +++ b/pkg/admin/admin.go @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package admin + +import ( + "context" + "errors" + "io" + "net" + "net/http" + "os" + "strings" +) + +import ( + "github.com/gin-gonic/gin" + + perrors "github.com/pkg/errors" + + uatomic "go.uber.org/atomic" +) + +import ( + "github.com/arana-db/arana/pkg/boot" + "github.com/arana-db/arana/pkg/constants" +) + +const K = "ARANA_ADMIN_SERVICE" + +var NotFoundError = errors.New("resource not found") + +var _hooks []Hook + +type Hook func(router gin.IRoutes) + +func Register(hook Hook) { + _hooks = append(_hooks, hook) +} + +func init() { + switch strings.ToLower(os.Getenv(constants.EnvDevelopEnvironment)) { + case "1", "true", "yes", "on": + gin.SetMode(gin.DebugMode) + default: + gin.SetMode(gin.ReleaseMode) + } +} + +type Service = boot.ConfigProvider + +type Server struct { + l net.Listener + engine *gin.Engine + service Service + started uatomic.Bool +} + +func New(service Service) *Server { + return &Server{ + service: service, + engine: gin.New(), + } +} + +func (srv *Server) Close() error { + if srv.l != nil { + return srv.l.Close() + } + return nil +} + +func (srv *Server) Listen(addr string) error { + if !srv.started.CAS(false, true) { + return io.EOF + } + + var ( + c net.ListenConfig + err error + ) + + srv.engine.Use(func(c *gin.Context) { + c.Set(K, srv.service) + c.Next() + }) + srv.engine.Use(gin.Logger()) + srv.engine.Use(gin.Recovery()) + srv.engine.Use( + ErrorHandler( + Map(NotFoundError). + ToResponse(func(c *gin.Context, err error) { + c.Status(http.StatusNotFound) + _, _ = c.Writer.WriteString(err.Error()) + }), + )) + + for _, hook := range _hooks { + hook(srv.engine) + } + + if srv.l, err = c.Listen(context.Background(), "tcp", addr); err != nil { + return perrors.WithStack(err) + } + return srv.engine.RunListener(srv.l) +} + +// GetService returns Service from gin context. +func GetService(c *gin.Context) Service { + v, _ := c.Get(K) + return v.(Service) +} diff --git a/pkg/admin/middleware.go b/pkg/admin/middleware.go new file mode 100644 index 00000000..8282d69a --- /dev/null +++ b/pkg/admin/middleware.go @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package admin + +import ( + "reflect" +) + +import ( + "github.com/gin-gonic/gin" +) + +// ErrorHandler is middleware that enables you to configure error handling from a centralized place via its fluent API. +// Copyright: https://github.com/josephwoodward/gin-errorhandling +func ErrorHandler(errMap ...*errorMapping) gin.HandlerFunc { + return func(context *gin.Context) { + context.Next() + + lastErr := context.Errors.Last() + if lastErr == nil { + return + } + + for _, e := range errMap { + for _, e2 := range e.fromErrors { + if lastErr.Err == e2 { + e.toResponse(context, lastErr.Err) + } else if isType(lastErr.Err, e2) { + e.toResponse(context, lastErr.Err) + } + } + } + } +} + +func isType(a, b interface{}) bool { + return reflect.TypeOf(a) == reflect.TypeOf(b) +} + +type errorMapping struct { + fromErrors []error + toStatusCode int + toResponse func(ctx *gin.Context, err error) +} + +// ToStatusCode specifies the status code returned to a caller when the error is handled. +func (r *errorMapping) ToStatusCode(statusCode int) *errorMapping { + r.toStatusCode = statusCode + r.toResponse = func(ctx *gin.Context, err error) { + ctx.Status(statusCode) + } + return r +} + +// ToResponse provides more control over the returned response when an error is matched. +func (r *errorMapping) ToResponse(response func(ctx *gin.Context, err error)) *errorMapping { + r.toResponse = response + return r +} + +// Map enables you to map errors to a given response status code or response body. +func Map(err ...error) *errorMapping { + return &errorMapping{ + fromErrors: err, + } +} diff --git a/pkg/admin/router/clusters.go b/pkg/admin/router/clusters.go new file mode 100644 index 00000000..c7cd6259 --- /dev/null +++ b/pkg/admin/router/clusters.go @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package router + +import ( + "context" + "net/http" +) + +import ( + "github.com/gin-gonic/gin" +) + +import ( + "github.com/arana-db/arana/pkg/admin" + "github.com/arana-db/arana/pkg/boot" +) + +func init() { + admin.Register(func(router gin.IRoutes) { + router.GET("/tenants/:tenant/clusters", ListClusters) + router.POST("/tenants/:tenant/clusters", CreateCluster) + router.GET("/tenants/:tenant/clusters/:cluster", GetCluster) + router.PUT("/tenants/:tenant/clusters/:cluster", UpdateCluster) + router.DELETE("/tenants/:tenant/clusters/:cluster", RemoveCluster) + }) +} + +func ListClusters(c *gin.Context) { + service := admin.GetService(c) + tenantName := c.Param("tenant") + clusters, err := service.ListClusters(context.Background(), tenantName) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, clusters) +} + +func GetCluster(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + cluster := c.Param("cluster") + data, err := service.GetCluster(context.Background(), tenant, cluster) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, data) +} + +func CreateCluster(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + var cluster *boot.ClusterBody + if err := c.ShouldBindJSON(&cluster); err == nil { + //TODO how to get cluster name? + err := service.UpsertCluster(context.Background(), tenant, "", cluster) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func UpdateCluster(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + cluster := c.Param("cluster") + var clusterBody *boot.ClusterBody + if err := c.ShouldBindJSON(&clusterBody); err == nil { + err := service.UpsertCluster(context.Background(), tenant, cluster, clusterBody) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func RemoveCluster(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + cluster := c.Param("cluster") + err := service.RemoveCluster(context.Background(), tenant, cluster) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) +} diff --git a/pkg/admin/router/db_groups.go b/pkg/admin/router/db_groups.go new file mode 100644 index 00000000..e94c2953 --- /dev/null +++ b/pkg/admin/router/db_groups.go @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package router + +import ( + "context" + "net/http" +) + +import ( + "github.com/gin-gonic/gin" +) + +import ( + "github.com/arana-db/arana/pkg/admin" + "github.com/arana-db/arana/pkg/boot" +) + +func init() { + admin.Register(func(router gin.IRoutes) { + router.POST("/tenants/:tenant/groups", CreateGroup) + router.GET("/tenants/:tenant/groups", ListGroups) + router.GET("/tenants/:tenant/groups/:group", GetGroup) + router.PUT("/tenants/:tenant/groups/:group", UpdateGroup) + router.DELETE("/tenants/:tenant/groups/:group", RemoveGroup) + }) +} + +func CreateGroup(c *gin.Context) { + service := admin.GetService(c) + tenantName := c.Param("tenant") + var group *boot.GroupBody + if err := c.ShouldBindJSON(&group); err == nil { + err := service.UpsertGroup(context.Background(), tenantName, "", "", group) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func ListGroups(c *gin.Context) { + service := admin.GetService(c) + tenantName := c.Param("tenant") + cluster := c.Param("cluster") + groups, err := service.ListGroups(context.Background(), tenantName, cluster) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, groups) +} + +func GetGroup(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + group := c.Param("group") + data, err := service.GetGroup(context.Background(), tenant, "", group) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, data) +} + +func UpdateGroup(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + group := c.Param("group") + var groupBody *boot.GroupBody + if err := c.ShouldBindJSON(&groupBody); err == nil { + err := service.UpsertGroup(context.Background(), tenant, "", group, groupBody) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func RemoveGroup(c *gin.Context) { + service := admin.GetService(c) + tenant, group := c.Param("tenant"), c.Param("group") + + err := service.RemoveGroup(context.Background(), tenant, "", group) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) +} diff --git a/pkg/admin/router/nodes.go b/pkg/admin/router/nodes.go new file mode 100644 index 00000000..9aac63f1 --- /dev/null +++ b/pkg/admin/router/nodes.go @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package router + +import ( + "net/http" +) + +import ( + "github.com/gin-gonic/gin" +) + +import ( + "github.com/arana-db/arana/pkg/admin" + "github.com/arana-db/arana/pkg/boot" + "github.com/arana-db/arana/pkg/config" +) + +func init() { + admin.Register(func(router gin.IRoutes) { + router.GET("/tenants/:tenant/nodes", ListNodes) + router.POST("/tenants/:tenant/nodes", CreateNode) + router.GET("/tenants/:tenant/nodes/:node", GetNode) + router.PUT("/tenants/:tenant/nodes/:node", UpdateNode) + router.DELETE("/tenants/:tenant/nodes/:node", RemoveNode) + }) +} + +func ListNodes(c *gin.Context) { + var results []config.Node + service := admin.GetService(c) + tenantName := c.Param("tenant") + clusters, err := service.ListClusters(c, tenantName) + if err != nil { + _ = c.Error(err) + return + } + for _, cluster := range clusters { + groups, err := service.ListGroups(c, tenantName, cluster) + if err != nil { + _ = c.Error(err) + return + } + for _, group := range groups { + nodesArray, err := service.ListNodes(c, tenantName, cluster, group) + if err != nil { + _ = c.Error(err) + return + } + for _, node := range nodesArray { + result, err := service.GetNode(c, tenantName, cluster, group, node) + if err != nil { + _ = c.Error(err) + return + } + results = append(results, *result) + } + } + } + c.JSON(http.StatusOK, results) +} + +func GetNode(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + node := c.Param("node") + clusters, err := service.ListClusters(c, tenant) + if err != nil { + _ = c.Error(err) + return + } + var data *config.Node + for _, cluster := range clusters { + groups, err := service.ListGroups(c, tenant, cluster) + if err != nil { + _ = c.Error(err) + return + } + for _, group := range groups { + data, err = service.GetNode(c, tenant, cluster, group, node) + if err != nil { + _ = c.Error(err) + continue + } + } + } + c.JSON(http.StatusOK, data) +} + +func CreateNode(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + var node *boot.NodeBody + if err := c.ShouldBindJSON(&node); err == nil { + err := service.UpsertNode(c, tenant, "", node) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func UpdateNode(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + node := c.Param("node") + var nodeBody *boot.NodeBody + if err := c.ShouldBindJSON(&nodeBody); err == nil { + err := service.UpsertNode(c, tenant, node, nodeBody) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func RemoveNode(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + node := c.Param("node") + err := service.RemoveNode(c, tenant, node) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusNoContent, nil) +} diff --git a/pkg/admin/router/tenants.go b/pkg/admin/router/tenants.go new file mode 100644 index 00000000..53bd740e --- /dev/null +++ b/pkg/admin/router/tenants.go @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package router + +import ( + "context" + "net/http" +) + +import ( + "github.com/gin-gonic/gin" +) + +import ( + "github.com/arana-db/arana/pkg/admin" + "github.com/arana-db/arana/pkg/boot" +) + +func init() { + admin.Register(func(router gin.IRoutes) { + router.GET("/tenants", ListTenants) + router.POST("/tenants", CreateTenant) + router.GET("/tenants/:tenant", GetTenant) + router.PUT("/tenants/:tenant", UpdateTenant) + router.DELETE("/tenants/:tenant", RemoveTenant) + }) +} + +func ListTenants(c *gin.Context) { + service := admin.GetService(c) + tenants, err := service.ListTenants(context.Background()) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, tenants) +} + +func CreateTenant(c *gin.Context) { + service := admin.GetService(c) + var tenantBody *boot.TenantBody + if err := c.ShouldBindJSON(&tenantBody); err == nil { + err := service.UpsertTenant(context.Background(), "", tenantBody) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusCreated, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func GetTenant(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + data, err := service.GetTenant(context.Background(), tenant) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, data) +} + +func UpdateTenant(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + var tenantBody *boot.TenantBody + if err := c.ShouldBindJSON(&tenantBody); err == nil { + err := service.UpsertTenant(context.Background(), tenant, tenantBody) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusOK, nil) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} + +func RemoveTenant(c *gin.Context) { + service := admin.GetService(c) + tenant := c.Param("tenant") + err := service.RemoveTenant(context.Background(), tenant) + if err != nil { + _ = c.Error(err) + return + } + c.JSON(http.StatusNoContent, nil) +} diff --git a/pkg/boot/boot.go b/pkg/boot/boot.go index 683518f5..47ffe1d3 100644 --- a/pkg/boot/boot.go +++ b/pkg/boot/boot.go @@ -29,7 +29,6 @@ import ( "github.com/arana-db/arana/pkg/config" "github.com/arana-db/arana/pkg/proto/rule" "github.com/arana-db/arana/pkg/runtime" - rcontext "github.com/arana-db/arana/pkg/runtime/context" "github.com/arana-db/arana/pkg/runtime/namespace" _ "github.com/arana-db/arana/pkg/schema" "github.com/arana-db/arana/pkg/security" @@ -41,62 +40,62 @@ func Boot(ctx context.Context, provider Discovery) error { return err } - clusters, err := provider.ListClusters(ctx) - if err != nil { - return err + var ( + err error + tenants []string + ) + if tenants, err = provider.ListTenants(ctx); err != nil { + return errors.Wrap(err, "no tenants found") } - for _, cluster := range clusters { - var ( - c *Cluster - ns *namespace.Namespace - ) - - if c, err = provider.GetCluster(ctx, cluster); err != nil { - continue + for _, tenant := range tenants { + clusters, err := provider.ListClusters(ctx, tenant) + if err != nil { + return err } - ctx = rcontext.WithTenant(ctx, c.Tenant) + for _, cluster := range clusters { + var ( + ns *namespace.Namespace + ) - if ns, err = buildNamespace(ctx, provider, cluster); err != nil { - log.Errorf("build namespace %s failed: %v", cluster, err) - continue - } - if err = namespace.Register(ns); err != nil { - log.Errorf("register namespace %s failed: %v", cluster, err) - continue - } - log.Infof("register namespace %s successfully", cluster) - security.DefaultTenantManager().PutCluster(c.Tenant, cluster) - } + if _, err = provider.GetCluster(ctx, tenant, cluster); err != nil { + continue + } - var tenants []string - if tenants, err = provider.ListTenants(ctx); err != nil { - return errors.Wrap(err, "no tenants found") - } + if ns, err = buildNamespace(ctx, tenant, provider, cluster); err != nil { + log.Errorf("build namespace %s failed: %v", cluster, err) + continue + } + if err = namespace.Register(ns); err != nil { + log.Errorf("register namespace %s failed: %v", cluster, err) + continue + } + log.Infof("register namespace %s successfully", cluster) + security.DefaultTenantManager().PutCluster(tenant, cluster) + } - for _, tenant := range tenants { - var t *config.Tenant - if t, err = provider.GetTenant(ctx, tenant); err != nil { + var users config.Users + if users, err = provider.ListUsers(ctx, tenant); err != nil { log.Errorf("failed to get tenant %s: %v", tenant, err) continue } - for _, it := range t.Users { - security.DefaultTenantManager().PutUser(tenant, it) + for i := range users { + security.DefaultTenantManager().PutUser(tenant, users[i]) } } return nil } -func buildNamespace(ctx context.Context, provider Discovery, clusterName string) (*namespace.Namespace, error) { +func buildNamespace(ctx context.Context, tenant string, provider Discovery, clusterName string) (*namespace.Namespace, error) { var ( cluster *config.DataSourceCluster groups []string err error ) - cluster, err = provider.GetDataSourceCluster(ctx, clusterName) + cluster, err = provider.GetDataSourceCluster(ctx, tenant, clusterName) if err != nil { return nil, err } @@ -105,19 +104,19 @@ func buildNamespace(ctx context.Context, provider Discovery, clusterName string) parameters = cluster.Parameters } - if groups, err = provider.ListGroups(ctx, clusterName); err != nil { + if groups, err = provider.ListGroups(ctx, tenant, clusterName); err != nil { return nil, err } var initCmds []namespace.Command for _, group := range groups { var nodes []string - if nodes, err = provider.ListNodes(ctx, clusterName, group); err != nil { + if nodes, err = provider.ListNodes(ctx, tenant, clusterName, group); err != nil { return nil, err } for _, it := range nodes { var node *config.Node - if node, err = provider.GetNode(ctx, clusterName, group, it); err != nil { + if node, err = provider.GetNode(ctx, tenant, clusterName, group, it); err != nil { return nil, errors.WithStack(err) } if node.Parameters == nil { @@ -129,14 +128,14 @@ func buildNamespace(ctx context.Context, provider Discovery, clusterName string) } var tables []string - if tables, err = provider.ListTables(ctx, clusterName); err != nil { + if tables, err = provider.ListTables(ctx, tenant, clusterName); err != nil { return nil, errors.WithStack(err) } var ru rule.Rule for _, table := range tables { var vt *rule.VTable - if vt, err = provider.GetTable(ctx, clusterName, table); err != nil { + if vt, err = provider.GetTable(ctx, tenant, clusterName, table); err != nil { return nil, err } if vt == nil { diff --git a/pkg/boot/discovery.go b/pkg/boot/discovery.go index 2b409978..90fb8dea 100644 --- a/pkg/boot/discovery.go +++ b/pkg/boot/discovery.go @@ -23,7 +23,6 @@ import ( "io/ioutil" "path/filepath" "regexp" - "sort" "strconv" "strings" "sync" @@ -57,6 +56,11 @@ var ( _regexpRuleExprSync sync.Once ) +var ( + ErrorNoTenant = errors.New("no tenant") + ErrorNoDataSourceCluster = errors.New("no datasourceCluster") +) + func getTableRegexp() *regexp.Regexp { _regexpTableOnce.Do(func() { _regexpTable = regexp.MustCompile("([a-zA-Z0-9\\-_]+)\\.([a-zA-Z0-9\\\\-_]+)") @@ -71,50 +75,82 @@ func getRuleExprRegexp() *regexp.Regexp { return _regexpRuleExpr } -type Cluster struct { - Tenant string - Type config.DataSourceType +type discovery struct { + inited uatomic.Bool + path string + options *BootOptions + + tenantOp config.TenantOperator + centers map[string]config.Center } -type Discovery interface { - // Init initializes discovery with context - Init(ctx context.Context) error - // ListTenants list tenants name - ListTenants(ctx context.Context) ([]string, error) - // GetTenant returns the tenant info - GetTenant(ctx context.Context, tenant string) (*config.Tenant, error) +func (fp *discovery) UpsertTenant(ctx context.Context, tenant string, body *TenantBody) error { + //TODO implement me + panic("implement me") +} - // ListListeners lists the listener names - ListListeners(ctx context.Context) ([]*config.Listener, error) +func (fp *discovery) RemoveTenant(ctx context.Context, tenant string) error { + //TODO implement me + panic("implement me") +} - // ListClusters lists the cluster names. - ListClusters(ctx context.Context) ([]string, error) - // GetClusterObject returns the dataSourceCluster object - GetDataSourceCluster(ctx context.Context, cluster string) (*config.DataSourceCluster, error) - // GetCluster returns the cluster info - GetCluster(ctx context.Context, cluster string) (*Cluster, error) - // ListGroups lists the group names. - ListGroups(ctx context.Context, cluster string) ([]string, error) +func (fp *discovery) UpsertCluster(ctx context.Context, tenant, cluster string, body *ClusterBody) error { + //TODO implement me + panic("implement me") +} - // ListNodes lists the node names. - ListNodes(ctx context.Context, cluster, group string) ([]string, error) - // GetNode returns the node info. - GetNode(ctx context.Context, cluster, group, node string) (*config.Node, error) +func (fp *discovery) RemoveCluster(ctx context.Context, tenant, cluster string) error { + //TODO implement me + panic("implement me") +} - // ListTables lists the table names. - ListTables(ctx context.Context, cluster string) ([]string, error) - // GetTable returns the table info. - GetTable(ctx context.Context, cluster, table string) (*rule.VTable, error) +func (fp *discovery) UpsertNode(ctx context.Context, tenant, node string, body *NodeBody) error { + //TODO implement me + panic("implement me") +} - // GetConfigCenter - GetConfigCenter() *config.Center +func (fp *discovery) RemoveNode(ctx context.Context, tenant, node string) error { + //TODO implement me + panic("implement me") } -type discovery struct { - inited uatomic.Bool - path string - options *BootOptions - c *config.Center +func (fp *discovery) UpsertGroup(ctx context.Context, tenant, cluster, group string, body *GroupBody) error { + //TODO implement me + panic("implement me") +} + +func (fp *discovery) RemoveGroup(ctx context.Context, tenant, cluster, group string) error { + //TODO implement me + panic("implement me") +} + +func (fp *discovery) BindNode(ctx context.Context, tenant, cluster, group, node string) error { + //TODO implement me + panic("implement me") +} + +func (fp *discovery) UnbindNode(ctx context.Context, tenant, cluster, group, node string) error { + //TODO implement me + panic("implement me") +} + +func (fp *discovery) UpsertTable(ctx context.Context, tenant, cluster, table string, body *TableBody) error { + //TODO implement me + panic("implement me") +} + +func (fp *discovery) RemoveTable(ctx context.Context, tenant, cluster, table string) error { + //TODO implement me + panic("implement me") +} + +func (fp *discovery) Import(ctx context.Context, info *config.Tenant) error { + op, ok := fp.centers[info.Name] + if !ok { + return ErrorNoTenant + } + + return op.Import(ctx, info) } func (fp *discovery) Init(ctx context.Context) error { @@ -122,128 +158,153 @@ func (fp *discovery) Init(ctx context.Context) error { return nil } - if err := fp.loadBootOptions(); err != nil { + cfg, err := LoadBootOptions(fp.path) + if err != nil { return err } + fp.options = cfg - if err := fp.initConfigCenter(); err != nil { + if err := config.Init(*fp.options.Config, fp.options.Spec.APIVersion); err != nil { return err } + fp.tenantOp, err = config.NewTenantOperator(config.GetStoreOperate()) + if err != nil { + return err + } + if err := fp.initAllConfigCenter(); err != nil { + return err + } return nil } -func (fp *discovery) loadBootOptions() error { - content, err := ioutil.ReadFile(fp.path) +func LoadBootOptions(path string) (*BootOptions, error) { + content, err := ioutil.ReadFile(path) if err != nil { err = errors.Wrap(err, "failed to load config") - return err + return nil, err } - if !file.IsYaml(fp.path) { - err = errors.Errorf("invalid config file format: %s", filepath.Ext(fp.path)) - return err + if !file.IsYaml(path) { + err = errors.Errorf("invalid config file format: %s", filepath.Ext(path)) + return nil, err } var cfg BootOptions if err = yaml.Unmarshal(content, &cfg); err != nil { err = errors.Wrapf(err, "failed to unmarshal config") - return err + return nil, err } - fp.options = &cfg - return nil + return &cfg, nil } -func (fp *discovery) initConfigCenter() error { - c, err := config.NewCenter(*fp.options.Config) - if err != nil { - return err - } +func (fp *discovery) initAllConfigCenter() error { + + tenants := fp.tenantOp.ListTenants() + for i := range tenants { + tenant := tenants[i] + + options := *fp.options.Config + if len(options.Options) == 0 { + options.Options = map[string]interface{}{} + } + options.Options["tenant"] = tenant - fp.c = c + fp.centers[tenant] = config.NewCenter(tenant, config.GetStoreOperate()) + } return nil } -func (fp *discovery) GetConfigCenter() *config.Center { - return fp.c +func (fp *discovery) GetDataSourceCluster(ctx context.Context, tenant, cluster string) (*config.DataSourceCluster, error) { + dataSourceCluster, err := fp.loadCluster(tenant, cluster) + if err != nil { + return nil, err + } + return dataSourceCluster, nil } -func (fp *discovery) GetDataSourceCluster(ctx context.Context, cluster string) (*config.DataSourceCluster, error) { - dataSourceCluster, ok := fp.loadCluster(cluster) +func (fp *discovery) GetGroup(ctx context.Context, tenant, cluster, group string) (*config.Group, error) { + exist, ok := fp.loadGroup(tenant, cluster, group) if !ok { return nil, nil } - return dataSourceCluster, nil + + return exist, nil } -func (fp *discovery) GetCluster(ctx context.Context, cluster string) (*Cluster, error) { - exist, ok := fp.loadCluster(cluster) - if !ok { - return nil, nil +func (fp *discovery) GetCluster(ctx context.Context, tenant, cluster string) (*Cluster, error) { + exist, err := fp.loadCluster(tenant, cluster) + if err != nil { + return nil, err } return &Cluster{ - Tenant: exist.Tenant, - Type: exist.Type, + Type: exist.Type, }, nil } func (fp *discovery) ListTenants(ctx context.Context) ([]string, error) { - cfg, err := fp.c.Load() - if err != nil { - return nil, err - } - var tenants []string - for _, it := range cfg.Data.Tenants { - tenants = append(tenants, it.Name) - } - return tenants, nil + return fp.tenantOp.ListTenants(), nil } func (fp *discovery) GetTenant(ctx context.Context, tenant string) (*config.Tenant, error) { - cfg, err := fp.c.Load() + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + cfg, err := op.Load(context.Background()) if err != nil { return nil, err } - for _, it := range cfg.Data.Tenants { - if it.Name == tenant { - return it, nil - } - } - return nil, nil + return cfg, nil } -func (fp *discovery) ListListeners(ctx context.Context) ([]*config.Listener, error) { - cfg, err := fp.c.Load() +func (fp *discovery) ListUsers(ctx context.Context, tenant string) (config.Users, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + cfg, err := op.Load(context.Background()) if err != nil { return nil, err } - return cfg.Data.Listeners, nil + return cfg.Users, nil +} + +func (fp *discovery) ListListeners(ctx context.Context) []*config.Listener { + return fp.options.Listeners } -func (fp *discovery) ListClusters(ctx context.Context) ([]string, error) { - cfg, err := fp.c.Load() +func (fp *discovery) ListClusters(ctx context.Context, tenant string) ([]string, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + cfg, err := op.Load(context.Background()) if err != nil { return nil, err } - clusters := make([]string, 0, len(cfg.Data.DataSourceClusters)) - for _, it := range cfg.Data.DataSourceClusters { - clusters = append(clusters, it.Name) - } + ret := make([]string, 0, 4) - return clusters, nil + for _, it := range cfg.DataSourceClusters { + ret = append(ret, it.Name) + } + return ret, nil } -func (fp *discovery) ListGroups(ctx context.Context, cluster string) ([]string, error) { - bingo, ok := fp.loadCluster(cluster) - if !ok { - return nil, nil +func (fp *discovery) ListGroups(ctx context.Context, tenant, cluster string) ([]string, error) { + bingo, err := fp.loadCluster(tenant, cluster) + if err != nil { + return nil, err } groups := make([]string, 0, len(bingo.Groups)) for _, it := range bingo.Groups { @@ -253,54 +314,90 @@ func (fp *discovery) ListGroups(ctx context.Context, cluster string) ([]string, return groups, nil } -func (fp *discovery) ListNodes(ctx context.Context, cluster, group string) ([]string, error) { - bingo, ok := fp.loadGroup(cluster, group) +func (fp *discovery) ListNodes(ctx context.Context, tenant, cluster, group string) ([]string, error) { + + bingo, ok := fp.loadGroup(tenant, cluster, group) if !ok { return nil, nil } var nodes []string - for _, it := range bingo.Nodes { - nodes = append(nodes, it.Name) + for i := range bingo.Nodes { + nodes = append(nodes, bingo.Nodes[i]) } return nodes, nil } -func (fp *discovery) ListTables(ctx context.Context, cluster string) ([]string, error) { - cfg, err := fp.c.Load() +func (fp *discovery) ListTables(ctx context.Context, tenant, cluster string) ([]string, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + cfg, err := op.Load(context.Background()) if err != nil { return nil, err } - var tables []string - for tb := range fp.loadTables(cfg, cluster) { + rule := cfg.ShardingRule + tables := make([]string, 0, 4) + + for i := range rule.Tables { + db, tb, err := parseTable(rule.Tables[i].Name) + if err != nil { + return nil, err + } + if db != cluster { + continue + } + tables = append(tables, tb) } - sort.Strings(tables) + return tables, nil } -func (fp *discovery) GetNode(ctx context.Context, cluster, group, node string) (*config.Node, error) { - bingo, ok := fp.loadGroup(cluster, group) +func (fp *discovery) GetNode(ctx context.Context, tenant, cluster, group, node string) (*config.Node, error) { + + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + var nodeId string + + bingo, ok := fp.loadGroup(tenant, cluster, group) if !ok { return nil, nil } - for _, it := range bingo.Nodes { - if it.Name == node { - return it, nil + + for i := range bingo.Nodes { + if bingo.Nodes[i] == node { + nodeId = node + break } } - return nil, nil -} -func (fp *discovery) GetTable(ctx context.Context, cluster, tableName string) (*rule.VTable, error) { - cfg, err := fp.c.Load() + if nodeId == "" { + return nil, nil + } + + nodes, err := fp.loadNodes(op) if err != nil { return nil, err } - table, ok := fp.loadTables(cfg, cluster)[tableName] + return nodes[nodeId], nil +} + +func (fp *discovery) GetTable(ctx context.Context, tenant, cluster, tableName string) (*rule.VTable, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + table, ok := fp.loadTables(cluster, op)[tableName] if !ok { return nil, nil } @@ -311,6 +408,7 @@ func (fp *discovery) GetTable(ctx context.Context, cluster, tableName string) (* dbFormat, tbFormat string dbBegin, tbBegin int dbEnd, tbEnd int + err error ) if table.Topology != nil { @@ -448,23 +546,37 @@ func (fp *discovery) GetTable(ctx context.Context, cluster, tableName string) (* return &vt, nil } -func (fp *discovery) loadCluster(cluster string) (*config.DataSourceCluster, bool) { - cfg, err := fp.c.Load() +func (fp *discovery) loadCluster(tenant, cluster string) (*config.DataSourceCluster, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, ErrorNoTenant + } + + cfg, err := op.Load(context.Background()) if err != nil { - return nil, false + return nil, err } - for _, it := range cfg.Data.DataSourceClusters { + for _, it := range cfg.DataSourceClusters { if it.Name == cluster { - return it, true + return it, nil } } - return nil, false + return nil, ErrorNoDataSourceCluster } -func (fp *discovery) loadGroup(cluster, group string) (*config.Group, bool) { - bingo, ok := fp.loadCluster(cluster) - if !ok { +func (fp *discovery) loadNodes(op config.Center) (config.Nodes, error) { + cfg, err := op.Load(context.Background()) + if err != nil { + return nil, err + } + + return cfg.Nodes, nil +} + +func (fp *discovery) loadGroup(tenant, cluster, group string) (*config.Group, bool) { + bingo, err := fp.loadCluster(tenant, cluster) + if err != nil { return nil, false } for _, it := range bingo.Groups { @@ -475,9 +587,14 @@ func (fp *discovery) loadGroup(cluster, group string) (*config.Group, bool) { return nil, false } -func (fp *discovery) loadTables(cfg *config.Configuration, cluster string) map[string]*config.Table { +func (fp *discovery) loadTables(cluster string, op config.Center) map[string]*config.Table { + cfg, err := op.Load(context.Background()) + if err != nil { + return nil + } + var tables map[string]*config.Table - for _, it := range cfg.Data.ShardingRule.Tables { + for _, it := range cfg.ShardingRule.Tables { db, tb, err := parseTable(it.Name) if err != nil { log.Warnf("skip parsing table rule: %v", err) @@ -594,8 +711,9 @@ func parseTable(input string) (db, tbl string, err error) { return } -func NewProvider(path string) Discovery { +func NewDiscovery(path string) Discovery { return &discovery{ - path: path, + path: path, + centers: map[string]config.Center{}, } } diff --git a/pkg/boot/discovery_import.go b/pkg/boot/discovery_import.go new file mode 100644 index 00000000..e00b5b31 --- /dev/null +++ b/pkg/boot/discovery_import.go @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package boot + +import ( + "context" +) + +import ( + "github.com/arana-db/arana/pkg/config" + "github.com/arana-db/arana/pkg/util/log" +) + +func RunImport(importConfPath, configPath string) bool { + bootCfg, err := LoadBootOptions(importConfPath) + if err != nil { + log.Fatalf("load bootstrap config failed: %+v", err) + } + + if err := config.Init(*bootCfg.Config, bootCfg.APIVersion); err != nil { + log.Fatal() + } + + cfg, err := config.Load(configPath) + if err != nil { + log.Fatal("load config from %s failed: %+v", configPath, err) + return false + } + + tenantOp, err := config.NewTenantOperator(config.GetStoreOperate()) + if err != nil { + log.Fatal("build tenant operator failed: %+v", configPath, err) + return false + } + + defer tenantOp.Close() + + for i := range cfg.Data.Tenants { + if err := tenantOp.CreateTenant(cfg.Data.Tenants[i].Name); err != nil { + log.Fatal("create tenant failed: %+v", configPath, err) + return false + } + } + + for i := range cfg.Data.Tenants { + + tenant := cfg.Data.Tenants[i] + + tenant.APIVersion = cfg.APIVersion + tenant.Metadata = cfg.Metadata + + ok := func() bool { + op := config.NewCenter(tenant.Name, config.GetStoreOperate()) + defer op.Close() + + if err := op.Import(context.Background(), tenant); err != nil { + log.Fatalf("persist config to config.store failed: %+v", err) + return false + } + + return true + }() + + if !ok { + return false + } + } + + log.Infof("finish import config into config_center") + return true +} diff --git a/pkg/boot/discovery_test.go b/pkg/boot/discovery_test.go index a618eb15..fda85294 100644 --- a/pkg/boot/discovery_test.go +++ b/pkg/boot/discovery_test.go @@ -19,6 +19,7 @@ package boot import ( "context" + "os" "testing" ) @@ -27,39 +28,41 @@ import ( ) import ( + "github.com/arana-db/arana/pkg/constants" "github.com/arana-db/arana/testdata" ) func TestFileProvider(t *testing.T) { - provider := NewProvider(testdata.Path("fake_bootstrap.yaml")) + os.Setenv(constants.EnvConfigPath, testdata.Path("fake_config.yaml")) + provider := NewDiscovery(testdata.Path("fake_bootstrap.yaml")) err := Boot(context.Background(), provider) assert.NoError(t, err, "should init ok") - clusters, err := provider.ListClusters(context.Background()) + clusters, err := provider.ListClusters(context.Background(), "arana") assert.NoError(t, err) assert.NotEmpty(t, clusters, "clusters should not be empty") t.Logf("clusters: %v\n", clusters) - groups, err := provider.ListGroups(context.Background(), clusters[0]) + groups, err := provider.ListGroups(context.Background(), "arana", clusters[0]) assert.NoError(t, err) assert.NotEmpty(t, groups, "groups should not be empty") t.Logf("groups: %v\n", groups) - nodes, err := provider.ListNodes(context.Background(), clusters[0], groups[0]) + nodes, err := provider.ListNodes(context.Background(), "arana", clusters[0], groups[0]) assert.NoError(t, err) assert.NotEmpty(t, nodes, "nodes should not be empty") - node, err := provider.GetNode(context.Background(), clusters[0], groups[0], nodes[0]) + node, err := provider.GetNode(context.Background(), "arana", clusters[0], groups[0], nodes[0]) assert.NoError(t, err) t.Logf("node: %s\n", node) - tables, err := provider.ListTables(context.Background(), clusters[0]) + tables, err := provider.ListTables(context.Background(), "arana", clusters[0]) assert.NoError(t, err) assert.NotEmpty(t, tables, "tables should not be empty") t.Logf("tables: %v\n", tables) - table, err := provider.GetTable(context.Background(), clusters[0], tables[0]) + table, err := provider.GetTable(context.Background(), "arana", clusters[0], tables[0]) assert.NoError(t, err) assert.True(t, table.AllowFullScan()) t.Logf("vtable: %v\n", table) diff --git a/pkg/boot/discovery_watch.go b/pkg/boot/discovery_watch.go new file mode 100644 index 00000000..d54a5c50 --- /dev/null +++ b/pkg/boot/discovery_watch.go @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package boot + +import ( + "context" + "time" +) + +import ( + "github.com/arana-db/arana/pkg/config" +) + +func (fp *discovery) WatchTenants(ctx context.Context) (<-chan config.TenantsEvent, context.CancelFunc, error) { + ch := make(chan config.TenantsEvent) + + cancel := fp.tenantOp.Subscribe(ctx, func(e config.Event) { + ch <- e.(config.TenantsEvent) + }) + + return ch, wrapWatchCancel(cancel, func() { + close(ch) + }), nil +} + +func (fp *discovery) WatchNodes(ctx context.Context, tenant string) (<-chan config.NodesEvent, context.CancelFunc, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, nil, ErrorNoTenant + } + + ch := make(chan config.NodesEvent) + + cancel := op.Subscribe(ctx, config.EventTypeNodes, func(e config.Event) { + ch <- e.(config.NodesEvent) + }) + + return ch, wrapWatchCancel(cancel, func() { + close(ch) + }), nil +} + +func (fp *discovery) WatchUsers(ctx context.Context, tenant string) (<-chan config.UsersEvent, context.CancelFunc, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, nil, ErrorNoTenant + } + + ch := make(chan config.UsersEvent) + + cancel := op.Subscribe(ctx, config.EventTypeUsers, func(e config.Event) { + ch <- e.(config.UsersEvent) + }) + + return ch, wrapWatchCancel(cancel, func() { + close(ch) + }), nil +} + +func (fp *discovery) WatchClusters(ctx context.Context, tenant string) (<-chan config.ClustersEvent, context.CancelFunc, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, nil, ErrorNoTenant + } + + ch := make(chan config.ClustersEvent) + + cancel := op.Subscribe(ctx, config.EventTypeClusters, func(e config.Event) { + ch <- e.(config.ClustersEvent) + }) + + return ch, wrapWatchCancel(cancel, func() { + close(ch) + }), nil +} + +func (fp *discovery) WatchShardingRule(ctx context.Context, tenant string) (<-chan config.ShardingRuleEvent, context.CancelFunc, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, nil, ErrorNoTenant + } + + ch := make(chan config.ShardingRuleEvent) + + cancel := op.Subscribe(ctx, config.EventTypeShardingRule, func(e config.Event) { + ch <- e.(config.ShardingRuleEvent) + }) + + return ch, wrapWatchCancel(cancel, func() { + close(ch) + }), nil +} + +func (fp *discovery) WatchShadowRule(ctx context.Context, tenant string) (<-chan config.ShadowRuleEvent, context.CancelFunc, error) { + op, ok := fp.centers[tenant] + if !ok { + return nil, nil, ErrorNoTenant + } + + ch := make(chan config.ShadowRuleEvent) + + cancel := op.Subscribe(ctx, config.EventTypeShadowRule, func(e config.Event) { + ch <- e.(config.ShadowRuleEvent) + }) + + return ch, wrapWatchCancel(cancel, func() { + close(ch) + }), nil +} + +func wrapWatchCancel(cancel context.CancelFunc, closeChan func()) context.CancelFunc { + return func() { + timer := time.NewTimer(100 * time.Millisecond) + defer timer.Stop() + cancel() + <-timer.C + closeChan() + } +} diff --git a/pkg/boot/options.go b/pkg/boot/options.go index 6bcf8f09..5437b218 100644 --- a/pkg/boot/options.go +++ b/pkg/boot/options.go @@ -21,6 +21,10 @@ import ( "github.com/arana-db/arana/pkg/config" ) -type BootOptions struct { - Config *config.ConfigOptions `yaml:"config"` -} +type ( + BootOptions struct { + config.Spec `yaml:",inline"` + Config *config.Options `yaml:"config"` + Listeners []*config.Listener `validate:"required,dive" yaml:"listeners" json:"listeners"` + } +) diff --git a/pkg/boot/proto.go b/pkg/boot/proto.go new file mode 100644 index 00000000..38fc2e7d --- /dev/null +++ b/pkg/boot/proto.go @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package boot + +import ( + "context" +) + +import ( + "github.com/arana-db/arana/pkg/config" + "github.com/arana-db/arana/pkg/proto/rule" +) + +type Cluster struct { + Tenant string `yaml:"tenant" json:"tenant"` + Type config.DataSourceType `yaml:"type" json:"type"` +} + +type GroupBody struct { + Nodes []string `yaml:"nodes" json:"nodes"` +} + +type ClusterBody struct { + Type config.DataSourceType `yaml:"type" json:"type"` + SqlMaxLimit int `yaml:"sql_max_limit" json:"sql_max_limit,omitempty"` + Parameters config.ParametersMap `yaml:"parameters" json:"parameters,omitempty"` +} + +type NodeBody struct { + Host string `yaml:"host" json:"host"` + Port int `yaml:"port" json:"port"` + Username string `yaml:"username" json:"username"` + Password string `yaml:"password" json:"password"` + Database string `yaml:"database" json:"database"` + Weight string `yaml:"weight" json:"weight"` + Parameters config.ParametersMap `yaml:"parameters" json:"parameters,omitempty"` + ConnProps map[string]interface{} `yaml:"conn_props" json:"conn_props,omitempty"` + Labels map[string]string `yaml:"labels" json:"labels,omitempty"` +} + +type TenantBody struct { + Users []*config.User `yaml:"users" json:"users"` +} + +type TableBody struct { + Sequence *config.Sequence `yaml:"sequence" json:"sequence"` + AllowFullScan bool `yaml:"allow_full_scan" json:"allow_full_scan,omitempty"` + DbRules []*config.Rule `yaml:"db_rules" json:"db_rules"` + TblRules []*config.Rule `yaml:"tbl_rules" json:"tbl_rules"` + Topology *config.Topology `yaml:"topology" json:"topology"` + ShadowTopology *config.Topology `yaml:"shadow_topology" json:"shadow_topology"` + Attributes map[string]string `yaml:"attributes" json:"attributes"` +} + +// ConfigProvider provides configurations. +type ConfigProvider interface { + ConfigUpdater + + // ListTenants list tenants name + ListTenants(ctx context.Context) ([]string, error) + + // GetTenant returns the tenant info + GetTenant(ctx context.Context, tenant string) (*config.Tenant, error) + + // ListUsers returns the user list + ListUsers(ctx context.Context, tenant string) (config.Users, error) + + // ListClusters lists the cluster names. + ListClusters(ctx context.Context, tenant string) ([]string, error) + + // GetDataSourceCluster returns the dataSourceCluster object + GetDataSourceCluster(ctx context.Context, tenant, cluster string) (*config.DataSourceCluster, error) + + // GetGroup returns the cluster info + GetGroup(ctx context.Context, tenant, cluster, group string) (*config.Group, error) + + // GetCluster returns the cluster info + GetCluster(ctx context.Context, tenant, cluster string) (*Cluster, error) + + // ListGroups lists the group names. + ListGroups(ctx context.Context, tenant, cluster string) ([]string, error) + + // ListNodes lists the node names. + ListNodes(ctx context.Context, tenant, cluster, group string) ([]string, error) + + // GetNode returns the node info. + GetNode(ctx context.Context, tenant, cluster, group, node string) (*config.Node, error) + + // ListTables lists the table names. + ListTables(ctx context.Context, tenant, cluster string) ([]string, error) + + // GetTable returns the table info. + GetTable(ctx context.Context, tenant, cluster, table string) (*rule.VTable, error) + + // Import import config into config_center + Import(ctx context.Context, info *config.Tenant) error +} + +// ConfigUpdater represents the mutations of configurations. +// The configuration is designed for structure storage, here is a example in tree-view: +// ── tenants +// ├── google +// │ ├── clusters: [mysql-instance-a,...] +// │ │ ├── employees +// │ │ │ ├── groups +// │ │ │ │ ├── employees_0000 +// │ │ │ │ ├── ... +// │ │ │ │ └── employees_0007 +// │ │ │ └── tables +// │ │ │ ├── employee +// │ │ │ ├── salary +// │ │ │ └── tax +// │ │ └── products +// │ │ └── groups +// │ │ ├── products_0000 +// │ │ ├── ... +// │ │ └── products_0007 +// │ └── nodes +// │ ├── mysql-instance-a +// │ ├── ... +// │ └── mysql-instance-x +// └── apple +// ├── ... +// └── ... +type ConfigUpdater interface { + // UpsertTenant upserts a tenant. + UpsertTenant(ctx context.Context, tenant string, body *TenantBody) error + + // RemoveTenant removes a tenant. + RemoveTenant(ctx context.Context, tenant string) error + + // UpsertCluster upserts a cluster into an existing tenant. + UpsertCluster(ctx context.Context, tenant, cluster string, body *ClusterBody) error + + // RemoveCluster removes a cluster from an existing tenant. + RemoveCluster(ctx context.Context, tenant, cluster string) error + + // UpsertNode upserts a physical node. + UpsertNode(ctx context.Context, tenant, node string, body *NodeBody) error + + // RemoveNode removes a physical node. + RemoveNode(ctx context.Context, tenant, node string) error + + // UpsertGroup upserts a group into an existing cluster. + UpsertGroup(ctx context.Context, tenant, cluster, group string, body *GroupBody) error + + // RemoveGroup removes a group from an existing cluster. + RemoveGroup(ctx context.Context, tenant, cluster, group string) error + + // BindNode binds a node into an existing cluster group. + BindNode(ctx context.Context, tenant, cluster, group, node string) error + + // UnbindNode unbinds a node from an existing cluster group. + UnbindNode(ctx context.Context, tenant, cluster, group, node string) error + + // UpsertTable upserts a new sharding table rule into a cluster. + UpsertTable(ctx context.Context, tenant, cluster, table string, body *TableBody) error + + // RemoveTable removes a sharding table config from an existing cluster. + RemoveTable(ctx context.Context, tenant, cluster, table string) error +} + +// ConfigWatcher listens for changes in related configuration +type ConfigWatcher interface { + // WatchTenants watches tenant change + // return <-chan config.TenantsEvent: listen to this chan to get related event + // return context.CancelFunc: used to cancel this monitoring, after execution, chan(<-chan config.TenantsEvent) will be closed + WatchTenants(ctx context.Context) (<-chan config.TenantsEvent, context.CancelFunc, error) + // WatchNodes watches nodes change + // return <-chan config.TenantsEvent: listen to this chan to get related event + // return context.CancelFunc: used to cancel this monitoring, after execution, chan(<-chan config.TenantsEvent) will be closed + WatchNodes(ctx context.Context, tenant string) (<-chan config.NodesEvent, context.CancelFunc, error) + // WatchUsers watches users change + // return <-chan config.TenantsEvent: listen to this chan to get related event + // return context.CancelFunc: used to cancel this monitoring, after execution, chan(<-chan config.TenantsEvent) will be closed + WatchUsers(ctx context.Context, tenant string) (<-chan config.UsersEvent, context.CancelFunc, error) + // WatchClusters watches cluster change + // return <-chan config.TenantsEvent: listen to this chan to get related event + // return context.CancelFunc: used to cancel this monitoring, after execution, chan(<-chan config.TenantsEvent) will be closed + WatchClusters(ctx context.Context, tenant string) (<-chan config.ClustersEvent, context.CancelFunc, error) + // WatchShardingRule watches sharding rule change + // return <-chan config.TenantsEvent: listen to this chan to get related event + // return context.CancelFunc: used to cancel this monitoring, after execution, chan(<-chan config.TenantsEvent) will be closed + WatchShardingRule(ctx context.Context, tenant string) (<-chan config.ShardingRuleEvent, context.CancelFunc, error) + // WatchShadowRule watches shadow rule change + // return <-chan config.TenantsEvent: listen to this chan to get related event + // return context.CancelFunc: used to cancel this monitoring, after execution, chan(<-chan config.TenantsEvent) will be closed + WatchShadowRule(ctx context.Context, tenant string) (<-chan config.ShadowRuleEvent, context.CancelFunc, error) +} + +type Discovery interface { + ConfigProvider + // ListListeners lists the listener names + ListListeners(ctx context.Context) []*config.Listener + + // Init initializes discovery with context + Init(ctx context.Context) error +} diff --git a/pkg/config/api.go b/pkg/config/api.go index 5fdfa60b..d23a336d 100644 --- a/pkg/config/api.go +++ b/pkg/config/api.go @@ -18,9 +18,11 @@ package config import ( - "errors" + "context" "fmt" "io" + "path/filepath" + "sync" ) type ( @@ -32,14 +34,25 @@ type ( ) const ( - DefaultConfigPath PathKey = "/arana-db/config" - DefaultConfigMetadataPath PathKey = "/arana-db/config/metadata" - DefaultConfigDataListenersPath PathKey = "/arana-db/config/data/listeners" - DefaultConfigDataSourceClustersPath PathKey = "/arana-db/config/data/dataSourceClusters" - DefaultConfigDataShardingRulePath PathKey = "/arana-db/config/data/shardingRule" - DefaultConfigDataTenantsPath PathKey = "/arana-db/config/data/tenants" + _rootPathTemp = "/%s/%s/" ) +var ( + DefaultRootPath PathKey + DefaultTenantsPath PathKey +) + +func initPath(root, version string) { + if root == "" { + root = "arana-db" + } + if version == "" { + version = "1.0" + } + DefaultRootPath = PathKey(fmt.Sprintf(_rootPathTemp, root, version)) + DefaultTenantsPath = PathKey(filepath.Join(string(DefaultRootPath), "tenants")) +} + const ( Http ProtocolType = iota MySQL @@ -52,54 +65,81 @@ const ( ) var ( - slots = make(map[string]StoreOperate) - storeOperate StoreOperate -) - -func GetStoreOperate() (StoreOperate, error) { - if storeOperate != nil { - return storeOperate, nil - } - - return nil, errors.New("StoreOperate not init") -} - -func Init(name string, options map[string]interface{}) error { - s, exist := slots[name] - if !exist { - return fmt.Errorf("StoreOperate solt=[%s] not exist", name) - } + slots = make(map[string]StoreOperator) - storeOperate = s + storeOperate StoreOperator - return storeOperate.Init(options) -} + once sync.Once +) // Register register store plugin -func Register(s StoreOperate) { +func Register(s StoreOperator) error { if _, ok := slots[s.Name()]; ok { - panic(fmt.Errorf("StoreOperate=[%s] already exist", s.Name())) + return fmt.Errorf("StoreOperator=[%s] already exist", s.Name()) } slots[s.Name()] = s + return nil } -// StoreOperate config storage related plugins -type StoreOperate interface { - io.Closer +type ( + callback func(e Event) - // Init plugin initialization - Init(options map[string]interface{}) error + SubscribeResult struct { + EventChan <-chan Event + Cancel context.CancelFunc + } - // Save save a configuration data - Save(key PathKey, val []byte) error + subscriber struct { + watch callback + ctx context.Context + } - // Get get a configuration - Get(key PathKey) ([]byte, error) + Options struct { + StoreName string `yaml:"name"` + RootPath string `yaml:"root_path"` + Options map[string]interface{} `yaml:"options"` + } - // Watch Monitor changes of the key - Watch(key PathKey) (<-chan []byte, error) + //TenantOperator actions specific to tenant spaces + TenantOperator interface { + io.Closer + //ListTenants lists all tenants + ListTenants() []string + //CreateTenant creates tenant + CreateTenant(string) error + //RemoveTenant removes tenant + RemoveTenant(string) error + //Subscribe subscribes tenants change + Subscribe(ctx context.Context, c callback) context.CancelFunc + } - // Name plugin name - Name() string -} + // Center Configuration center for each tenant, tenant-level isolation + Center interface { + io.Closer + // Load loads the full Tenant configuration, the first time it will be loaded remotely, + // and then it will be directly assembled from the cache layer + Load(ctx context.Context) (*Tenant, error) + // Import imports the configuration information of a tenant + Import(ctx context.Context, cfg *Tenant) error + // Subscribe subscribes to all changes of an event by EventType + Subscribe(ctx context.Context, et EventType, c callback) context.CancelFunc + // Tenant tenant info + Tenant() string + } + + // StoreOperator config storage related plugins + StoreOperator interface { + io.Closer + // Init plugin initialization + Init(options map[string]interface{}) error + // Save save a configuration data + Save(key PathKey, val []byte) error + // Get get a configuration + Get(key PathKey) ([]byte, error) + // Watch Monitor changes of the key + Watch(key PathKey) (<-chan []byte, error) + // Name plugin name + Name() string + } +) diff --git a/pkg/config/api_test.go b/pkg/config/api_test.go index b193df11..822a2fd3 100644 --- a/pkg/config/api_test.go +++ b/pkg/config/api_test.go @@ -30,46 +30,27 @@ import ( import ( "github.com/arana-db/arana/pkg/config" + _ "github.com/arana-db/arana/pkg/config/etcd" + _ "github.com/arana-db/arana/pkg/config/file" + _ "github.com/arana-db/arana/pkg/config/nacos" "github.com/arana-db/arana/testdata" ) -func TestGetStoreOperate(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - // mockStore := NewMockStoreOperate(ctrl) - tests := []struct { - name string - want config.StoreOperate - wantErr assert.ErrorAssertionFunc - }{ - {"GetStoreOperate_1", nil, assert.Error}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := config.GetStoreOperate() - if !tt.wantErr(t, err, fmt.Sprintf("GetStoreOperate()")) { - return - } - assert.Equalf(t, tt.want, got, "GetStoreOperate()") - }) - } -} - func TestInit(t *testing.T) { type args struct { - name string - options map[string]interface{} + version string + options config.Options } tests := []struct { name string args args wantErr assert.ErrorAssertionFunc }{ - {"Init_1", args{"file", nil}, assert.Error}, + {"Init_1", args{"file", config.Options{}}, assert.Error}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.wantErr(t, config.Init(tt.args.name, tt.args.options), fmt.Sprintf("Init(%v, %v)", tt.args.name, tt.args.options)) + tt.wantErr(t, config.Init(tt.args.options, tt.args.version), fmt.Sprintf("Init(%v, %v)", tt.args.options, tt.args.version)) }) } } @@ -80,7 +61,7 @@ func TestRegister(t *testing.T) { mockStore := testdata.NewMockStoreOperate(ctrl) mockStore.EXPECT().Name().Times(2).Return("nacos") type args struct { - s config.StoreOperate + s config.StoreOperator } tests := []struct { name string @@ -107,26 +88,27 @@ func Test_api(t *testing.T) { mockFileStore2 := testdata.NewMockStoreOperate(ctrl) mockFileStore2.EXPECT().Name().AnyTimes().Return("file") - assert.Panics(t, func() { - config.Register(mockFileStore2) - }, "StoreOperate=[file] already exist") + assert.Error(t, config.Register(mockFileStore2), "StoreOperate=[file] already exist") } func Test_Init(t *testing.T) { - options := make(map[string]interface{}, 0) + options := config.Options{ + StoreName: "fake", + RootPath: "", + Options: nil, + } ctrl := gomock.NewController(t) defer ctrl.Finish() mockFileStore := testdata.NewMockStoreOperate(ctrl) mockFileStore.EXPECT().Name().Times(2).Return("fake") - mockFileStore.EXPECT().Init(options).Return(nil) - err := config.Init("fake", options) + mockFileStore.EXPECT().Init(gomock.Any()).Return(nil) + err := config.InitStoreOperate(options) assert.Error(t, err) config.Register(mockFileStore) - err = config.Init("fake", options) + err = config.InitStoreOperate(options) assert.NoError(t, err) - store, err := config.GetStoreOperate() - assert.NoError(t, err) + store := config.GetStoreOperate() assert.NotNil(t, store) } diff --git a/pkg/config/config.go b/pkg/config/config.go index 91cb83cf..f1ea4f8b 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -23,173 +23,429 @@ package config import ( "context" "encoding/json" - "errors" "fmt" + "path/filepath" "sync" "sync/atomic" ) import ( + "github.com/pkg/errors" + "github.com/tidwall/gjson" "gopkg.in/yaml.v3" ) import ( - "github.com/arana-db/arana/pkg/util/env" "github.com/arana-db/arana/pkg/util/log" ) -var ( - ConfigKeyMapping map[PathKey]string = map[PathKey]string{ - DefaultConfigMetadataPath: "metadata", - DefaultConfigDataTenantsPath: "data.tenants", - DefaultConfigDataListenersPath: "data.listeners", - DefaultConfigDataSourceClustersPath: "data.clusters", - DefaultConfigDataShardingRulePath: "data.sharding_rule", +type PathInfo struct { + DefaultConfigSpecPath PathKey + DefaultTenantBaseConfigPath PathKey + DefaultConfigDataNodesPath PathKey + DefaultConfigDataUsersPath PathKey + DefaultConfigDataSourceClustersPath PathKey + DefaultConfigDataShardingRulePath PathKey + DefaultConfigDataShadowRulePath PathKey + + ConfigKeyMapping map[PathKey]string + ConfigEventMapping map[PathKey]EventType + BuildEventMapping map[EventType]func(pre, cur *Tenant) Event + ConfigValSupplier map[PathKey]func(cfg *Tenant) interface{} +} + +func NewPathInfo(tenant string) *PathInfo { + + p := &PathInfo{} + + p.DefaultTenantBaseConfigPath = PathKey(filepath.Join(string(DefaultRootPath), fmt.Sprintf("tenants/%s", tenant))) + p.DefaultConfigSpecPath = PathKey(filepath.Join(string(p.DefaultTenantBaseConfigPath), "spec")) + p.DefaultConfigDataNodesPath = PathKey(filepath.Join(string(p.DefaultTenantBaseConfigPath), "nodes")) + p.DefaultConfigDataUsersPath = PathKey(filepath.Join(string(p.DefaultTenantBaseConfigPath), "users")) + p.DefaultConfigDataSourceClustersPath = PathKey(filepath.Join(string(p.DefaultTenantBaseConfigPath), "dataSourceClusters")) + p.DefaultConfigDataShardingRulePath = PathKey(filepath.Join(string(p.DefaultConfigDataSourceClustersPath), "shardingRule")) + p.DefaultConfigDataShadowRulePath = PathKey(filepath.Join(string(p.DefaultConfigDataSourceClustersPath), "shadowRule")) + + p.ConfigEventMapping = map[PathKey]EventType{ + p.DefaultConfigDataUsersPath: EventTypeUsers, + p.DefaultConfigDataNodesPath: EventTypeNodes, + p.DefaultConfigDataSourceClustersPath: EventTypeClusters, + p.DefaultConfigDataShardingRulePath: EventTypeShardingRule, + p.DefaultConfigDataShadowRulePath: EventTypeShadowRule, } - _configValSupplier map[PathKey]func(cfg *Configuration) interface{} = map[PathKey]func(cfg *Configuration) interface{}{ - DefaultConfigMetadataPath: func(cfg *Configuration) interface{} { - return &cfg.Metadata + p.ConfigValSupplier = map[PathKey]func(cfg *Tenant) interface{}{ + p.DefaultConfigSpecPath: func(cfg *Tenant) interface{} { + return &cfg.Spec }, - DefaultConfigDataTenantsPath: func(cfg *Configuration) interface{} { - return &cfg.Data.Tenants + p.DefaultConfigDataUsersPath: func(cfg *Tenant) interface{} { + return &cfg.Users }, - DefaultConfigDataListenersPath: func(cfg *Configuration) interface{} { - return &cfg.Data.Listeners + p.DefaultConfigDataSourceClustersPath: func(cfg *Tenant) interface{} { + return &cfg.DataSourceClusters }, - DefaultConfigDataSourceClustersPath: func(cfg *Configuration) interface{} { - return &cfg.Data.DataSourceClusters + p.DefaultConfigDataNodesPath: func(cfg *Tenant) interface{} { + return &cfg.Nodes }, - DefaultConfigDataShardingRulePath: func(cfg *Configuration) interface{} { - return &cfg.Data.ShardingRule + p.DefaultConfigDataShardingRulePath: func(cfg *Tenant) interface{} { + return cfg.ShardingRule + }, + p.DefaultConfigDataShadowRulePath: func(cfg *Tenant) interface{} { + return cfg.ShadowRule }, } -) -type Changeable interface { - Name() string - Sign() string + p.ConfigKeyMapping = map[PathKey]string{ + p.DefaultConfigSpecPath: "spec", + p.DefaultConfigDataUsersPath: "users", + p.DefaultConfigDataSourceClustersPath: "clusters", + p.DefaultConfigDataShardingRulePath: "sharding_rule", + p.DefaultConfigDataNodesPath: "nodes", + p.DefaultConfigDataShadowRulePath: "shadow_rule", + } + + p.BuildEventMapping = map[EventType]func(pre *Tenant, cur *Tenant) Event{ + EventTypeNodes: func(pre, cur *Tenant) Event { + return Nodes(cur.Nodes).Diff(pre.Nodes) + }, + EventTypeUsers: func(pre, cur *Tenant) Event { + return Users(cur.Users).Diff(pre.Users) + }, + EventTypeClusters: func(pre, cur *Tenant) Event { + return Clusters(cur.DataSourceClusters).Diff(pre.DataSourceClusters) + }, + EventTypeShardingRule: func(pre, cur *Tenant) Event { + return cur.ShardingRule.Diff(pre.ShardingRule) + }, + EventTypeShadowRule: func(pre, cur *Tenant) Event { + return cur.ShadowRule.Diff(pre.ShadowRule) + }, + } + + return p } -type Observer func() +func NewTenantOperator(op StoreOperator) (TenantOperator, error) { + tenantOp := &tenantOperate{ + op: op, + tenants: map[string]struct{}{}, + cancels: []context.CancelFunc{}, + observers: &observerBucket{observers: map[EventType][]*subscriber{}}, + } + + if err := tenantOp.init(); err != nil { + return nil, err + } -type ConfigOptions struct { - StoreName string `yaml:"name"` - Options map[string]interface{} `yaml:"options"` + return tenantOp, nil } -type Center struct { - initialize int32 - storeOperate StoreOperate - confHolder atomic.Value // 里面持有了最新的 *Configuration 对象 - lock sync.RWMutex - observers []Observer - watchCancels []context.CancelFunc +type tenantOperate struct { + op StoreOperator + lock sync.RWMutex + + tenants map[string]struct{} + observers *observerBucket + + cancels []context.CancelFunc } -func NewCenter(options ConfigOptions) (*Center, error) { - if err := Init(options.StoreName, options.Options); err != nil { - return nil, err +func (tp *tenantOperate) Subscribe(ctx context.Context, c callback) context.CancelFunc { + return tp.observers.add(EventTypeTenants, c) +} + +func (tp *tenantOperate) init() error { + tp.lock.Lock() + defer tp.lock.Unlock() + + if len(tp.tenants) == 0 { + val, err := tp.op.Get(DefaultTenantsPath) + if err != nil { + return err + } + + tenants := make([]string, 0, 4) + if err := yaml.Unmarshal(val, &tenants); err != nil { + return err + } + + for i := range tenants { + tp.tenants[tenants[i]] = struct{}{} + } } - operate, err := GetStoreOperate() + ctx, cancel := context.WithCancel(context.Background()) + tp.cancels = append(tp.cancels, cancel) + + return tp.watchTenants(ctx) +} + +func (tp *tenantOperate) watchTenants(ctx context.Context) error { + ch, err := tp.op.Watch(DefaultTenantsPath) if err != nil { - return nil, err + return err } - return &Center{ - confHolder: atomic.Value{}, - lock: sync.RWMutex{}, - storeOperate: operate, - observers: make([]Observer, 0, 2), - }, nil + go func(ctx context.Context) { + consumer := func(ret []byte) { + tenants := make([]string, 0, 4) + if err := yaml.Unmarshal(ret, &tenants); err != nil { + log.Errorf("marshal tenants content : %v", err) + return + } + + event := Tenants(tenants).Diff(tp.ListTenants()) + log.Infof("receive tenants change event : %#v", event) + tp.observers.notify(EventTypeTenants, event) + + tp.lock.Lock() + defer tp.lock.Unlock() + + tp.tenants = map[string]struct{}{} + for i := range tenants { + tp.tenants[tenants[i]] = struct{}{} + } + } + + for { + select { + case ret := <-ch: + consumer(ret) + case <-ctx.Done(): + log.Infof("stop watch : %s", DefaultTenantsPath) + } + } + }(ctx) + + return nil } -func (c *Center) Close() error { - if err := c.storeOperate.Close(); err != nil { +func (tp *tenantOperate) ListTenants() []string { + tp.lock.RLock() + defer tp.lock.RUnlock() + + ret := make([]string, 0, len(tp.tenants)) + + for i := range tp.tenants { + ret = append(ret, i) + } + + return ret +} + +func (tp *tenantOperate) CreateTenant(name string) error { + tp.lock.Lock() + defer tp.lock.Unlock() + + if _, ok := tp.tenants[name]; ok { + return nil + } + + tp.tenants[name] = struct{}{} + ret := make([]string, 0, len(tp.tenants)) + for i := range tp.tenants { + ret = append(ret, i) + } + + data, err := yaml.Marshal(ret) + if err != nil { return err } - for i := range c.watchCancels { - c.watchCancels[i]() + if err := tp.op.Save(DefaultTenantsPath, data); err != nil { + return errors.Wrap(err, "create tenant name") + } + + //need to insert the relevant configuration data under the relevant tenant + tenantPathInfo := NewPathInfo(name) + for i := range tenantPathInfo.ConfigKeyMapping { + if err := tp.op.Save(i, []byte("")); err != nil { + return errors.Wrap(err, fmt.Sprintf("create tenant resource : %s", i)) + } } return nil } -func (c *Center) Load() (*Configuration, error) { - return c.LoadContext(context.Background()) +func (tp *tenantOperate) RemoveTenant(name string) error { + tp.lock.Lock() + defer tp.lock.Unlock() + + delete(tp.tenants, name) + + ret := make([]string, 0, len(tp.tenants)) + for i := range tp.tenants { + ret = append(ret, i) + } + + data, err := yaml.Marshal(ret) + if err != nil { + return err + } + + return tp.op.Save(DefaultTenantsPath, data) } -func (c *Center) LoadContext(ctx context.Context) (*Configuration, error) { - val := c.confHolder.Load() - if val == nil { - cfg, err := c.loadFromStore(ctx) - if err != nil { +func (tp *tenantOperate) Close() error { + for i := range tp.cancels { + tp.cancels[i]() + } + return nil +} + +type observerBucket struct { + lock sync.RWMutex + observers map[EventType][]*subscriber +} + +func (b *observerBucket) notify(et EventType, val Event) { + b.lock.RLock() + defer b.lock.RUnlock() + + v := b.observers[et] + for i := range v { + item := v[i] + select { + case <-item.ctx.Done(): + default: + item.watch(val) + } + } +} + +func (b *observerBucket) add(et EventType, f callback) context.CancelFunc { + b.lock.Lock() + defer b.lock.Unlock() + + if _, ok := b.observers[et]; !ok { + b.observers[et] = make([]*subscriber, 0, 4) + } + + ctx, cancel := context.WithCancel(context.Background()) + + v := b.observers[et] + v = append(v, &subscriber{ + watch: f, + ctx: ctx, + }) + + b.observers[et] = v + + return cancel +} + +type center struct { + tenant string + initialize int32 + + storeOperate StoreOperator + pathInfo *PathInfo + holders map[PathKey]*atomic.Value + + observers *observerBucket + watchCancels []context.CancelFunc +} + +func NewCenter(tenant string, op StoreOperator) Center { + + p := NewPathInfo(tenant) + + holders := map[PathKey]*atomic.Value{} + for k := range p.ConfigKeyMapping { + holders[k] = &atomic.Value{} + holders[k].Store(NewEmptyTenant()) + } + + return ¢er{ + pathInfo: p, + tenant: tenant, + holders: holders, + storeOperate: op, + observers: &observerBucket{observers: map[EventType][]*subscriber{}}, + } +} + +func (c *center) Close() error { + for i := range c.watchCancels { + c.watchCancels[i]() + } + return nil +} + +func (c *center) Load(ctx context.Context) (*Tenant, error) { + if atomic.CompareAndSwapInt32(&c.initialize, 0, 1) { + if err := c.loadFromStore(ctx); err != nil { return nil, err } - c.confHolder.Store(cfg) - out, _ := yaml.Marshal(cfg) - if env.IsDevelopEnvironment() { - log.Infof("load configuration:\n%s", string(out)) + if err := c.watchFromStore(); err != nil { + return nil, err } } - val = c.confHolder.Load() + return c.compositeConfiguration(), nil +} + +func (c *center) compositeConfiguration() *Tenant { + conf := &Tenant{} - return val.(*Configuration), nil + if val := c.holders[c.pathInfo.DefaultConfigDataUsersPath].Load(); val != nil { + conf.Users = val.(*Tenant).Users + } + if val := c.holders[c.pathInfo.DefaultConfigDataNodesPath].Load(); val != nil { + conf.Nodes = val.(*Tenant).Nodes + } + if val := c.holders[c.pathInfo.DefaultConfigDataSourceClustersPath].Load(); val != nil { + conf.DataSourceClusters = val.(*Tenant).DataSourceClusters + } + if val := c.holders[c.pathInfo.DefaultConfigDataShardingRulePath].Load(); val != nil { + conf.ShardingRule = val.(*Tenant).ShardingRule + } + if val := c.holders[c.pathInfo.DefaultConfigDataShadowRulePath].Load(); val != nil { + conf.ShadowRule = val.(*Tenant).ShadowRule + } + + if conf.Empty() { + return nil + } + return conf } -func (c *Center) ImportConfiguration(cfg *Configuration) error { - c.confHolder.Store(cfg) - return c.Persist() +func (c *center) Import(ctx context.Context, cfg *Tenant) error { + return c.doPersist(ctx, cfg) } -func (c *Center) loadFromStore(ctx context.Context) (*Configuration, error) { +func (c *center) loadFromStore(ctx context.Context) error { operate := c.storeOperate - cfg := &Configuration{ - Metadata: make(map[string]interface{}), - Data: &Data{ - Listeners: make([]*Listener, 0), - Tenants: make([]*Tenant, 0), - DataSourceClusters: make([]*DataSourceCluster, 0), - ShardingRule: &ShardingRule{}, - }, - } - - for k := range ConfigKeyMapping { + for k := range c.pathInfo.ConfigKeyMapping { val, err := operate.Get(k) if err != nil { - return nil, err + return err } - supplier, ok := _configValSupplier[k] - + holder := c.holders[k] + supplier, ok := c.pathInfo.ConfigValSupplier[k] if !ok { - return nil, fmt.Errorf("%s not register val supplier", k) + return fmt.Errorf("%s not register val supplier", k) } if len(val) != 0 { - if err := json.Unmarshal(val, supplier(cfg)); err != nil { - return nil, err + exp := supplier(holder.Load().(*Tenant)) + if err := yaml.Unmarshal(val, exp); err != nil { + return err } } } - return cfg, nil + return nil } -func (c *Center) watchFromStore() error { - if !atomic.CompareAndSwapInt32(&c.initialize, 0, 1) { - return nil - } - - cancels := make([]context.CancelFunc, 0, len(ConfigKeyMapping)) +func (c *center) watchFromStore() error { + cancels := make([]context.CancelFunc, 0, len(c.pathInfo.ConfigKeyMapping)) - for k := range ConfigKeyMapping { + for k := range c.pathInfo.ConfigKeyMapping { ctx, cancel := context.WithCancel(context.Background()) cancels = append(cancels, cancel) ch, err := c.storeOperate.Watch(k) @@ -203,26 +459,31 @@ func (c *Center) watchFromStore() error { return nil } -func (c *Center) watchKey(ctx context.Context, key PathKey, ch <-chan []byte) { +func (c *center) watchKey(ctx context.Context, key PathKey, ch <-chan []byte) { consumer := func(ret []byte) { - c.lock.Lock() - defer c.lock.Unlock() - - supplier, ok := _configValSupplier[key] + supplier, ok := c.pathInfo.ConfigValSupplier[key] if !ok { log.Errorf("%s not register val supplier", key) return } + if len(ret) == 0 { + log.Errorf("%s receive empty content, ignore", key) + return + } - cfg := c.confHolder.Load().(*Configuration) - - if len(ret) != 0 { - if err := json.Unmarshal(ret, supplier(cfg)); err != nil { - log.Errorf("", err) - } + cur := NewEmptyTenant() + if err := yaml.Unmarshal(ret, supplier(cur)); err != nil { + log.Errorf("%s marshal new content : %v", key, err) + return } - c.confHolder.Store(cfg) + pre := c.holders[key].Load().(*Tenant) + et := c.pathInfo.ConfigEventMapping[key] + event := c.pathInfo.BuildEventMapping[et](pre, cur) + log.Infof("%s receive change event : %#v", key, event) + + c.observers.notify(et, event) + c.holders[key].Store(cur) } for { @@ -235,27 +496,54 @@ func (c *Center) watchKey(ctx context.Context, key PathKey, ch <-chan []byte) { } } -func (c *Center) Persist() error { - return c.PersistContext(context.Background()) +func (c *center) PersistContext(ctx context.Context) error { + return c.doPersist(ctx, c.compositeConfiguration()) } -func (c *Center) PersistContext(ctx context.Context) error { - val := c.confHolder.Load() - if val == nil { - return errors.New("ConfHolder.load is nil") - } - - conf := val.(*Configuration) +func (c *center) doPersist(ctx context.Context, conf *Tenant) error { configJson, err := json.Marshal(conf) if err != nil { return fmt.Errorf("config json.marshal failed %v err:", err) } - for k, v := range ConfigKeyMapping { - if err := c.storeOperate.Save(k, []byte(gjson.GetBytes(configJson, v).String())); err != nil { + for k, v := range c.pathInfo.ConfigKeyMapping { + + ret, err := JSONToYAML(gjson.GetBytes(configJson, v).String()) + if err != nil { + return err + } + + if err := c.storeOperate.Save(k, ret); err != nil { return err } } return nil } + +//Subscribe +func (c *center) Subscribe(ctx context.Context, et EventType, f callback) context.CancelFunc { + + return c.observers.add(et, f) +} + +func (c *center) Tenant() string { + return c.tenant +} + +func JSONToYAML(j string) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal([]byte(j), &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} diff --git a/pkg/config/default.go b/pkg/config/default.go new file mode 100644 index 00000000..9e84f652 --- /dev/null +++ b/pkg/config/default.go @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package config + +import ( + "github.com/pkg/errors" +) + +import ( + "github.com/arana-db/arana/pkg/util/log" +) + +var ( + ErrorNoStoreOperate = errors.New("no store operate") +) + +func GetStoreOperate() StoreOperator { + return storeOperate +} + +func Init(options Options, version string) error { + initPath(options.RootPath, version) + + var err error + once.Do(func() { + err = InitStoreOperate(options) + }) + return err +} + +func InitStoreOperate(options Options) error { + op, ok := slots[options.StoreName] + if !ok { + return ErrorNoStoreOperate + } + if err := op.Init(options.Options); err != nil { + return err + } + log.Infof("[StoreOperate] use plugin : %s", options.StoreName) + storeOperate = op + return nil +} diff --git a/pkg/config/diff.go b/pkg/config/diff.go new file mode 100644 index 00000000..fb47ad39 --- /dev/null +++ b/pkg/config/diff.go @@ -0,0 +1,307 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package config + +import ( + "reflect" +) + +func (t Tenants) Diff(old Tenants) *TenantsEvent { + addTenants := make([]string, 0, 4) + deleteTenants := make([]string, 0, 4) + + newTmp := map[string]struct{}{} + oldTmp := map[string]struct{}{} + + for i := range t { + newTmp[t[i]] = struct{}{} + } + + for i := range old { + oldTmp[old[i]] = struct{}{} + } + + for i := range newTmp { + if _, ok := oldTmp[i]; !ok { + addTenants = append(addTenants, i) + } + } + + for i := range oldTmp { + if _, ok := newTmp[i]; !ok { + deleteTenants = append(deleteTenants, i) + } + } + + return &TenantsEvent{ + AddTenants: addTenants, + DeleteTenants: deleteTenants, + } +} + +func (n Nodes) Diff(old Nodes) *NodesEvent { + addNodes := make([]*Node, 0, 4) + updateNodes := make([]*Node, 0, 4) + deleteNodes := make([]*Node, 0, 4) + + for i := range n { + if _, ok := old[i]; !ok { + addNodes = append(addNodes, n[i]) + } + } + + for i := range old { + val, ok := n[old[i].Name] + if !ok { + deleteNodes = append(deleteNodes, old[i]) + continue + } + + if !val.Equals(old[i]) { + updateNodes = append(updateNodes, val) + continue + } + } + + return &NodesEvent{ + AddNodes: addNodes, + UpdateNodes: updateNodes, + DeleteNodes: deleteNodes, + } +} + +func (u Users) Diff(old Users) *UsersEvent { + addUsers := make([]*User, 0, 4) + updateUsers := make([]*User, 0, 4) + deleteUsers := make([]*User, 0, 4) + + newTmp := map[string]*User{} + oldTmp := map[string]*User{} + + for i := range u { + newTmp[u[i].Username] = u[i] + } + + for i := range old { + oldTmp[old[i].Username] = old[i] + } + + for i := range newTmp { + if _, ok := oldTmp[i]; !ok { + addUsers = append(addUsers, newTmp[i]) + } + } + + for i := range oldTmp { + val, ok := newTmp[oldTmp[i].Username] + if !ok { + deleteUsers = append(deleteUsers, oldTmp[i]) + continue + } + + if !val.Equals(oldTmp[i]) { + updateUsers = append(updateUsers, val) + continue + } + } + + return &UsersEvent{ + AddUsers: addUsers, + UpdateUsers: updateUsers, + DeleteUsers: deleteUsers, + } +} + +func (c Clusters) Diff(old Clusters) *ClustersEvent { + addClusters := make([]*DataSourceCluster, 0, 4) + updateClusters := make([]*ClusterEvent, 0, 4) + deleteClusters := make([]*DataSourceCluster, 0, 4) + + newTmp := map[string]*DataSourceCluster{} + oldTmp := map[string]*DataSourceCluster{} + + for i := range c { + newTmp[c[i].Name] = c[i] + } + + for i := range old { + oldTmp[old[i].Name] = old[i] + } + + for i := range c { + if _, ok := oldTmp[c[i].Name]; !ok { + addClusters = append(addClusters, c[i]) + } + } + + for i := range old { + val, ok := newTmp[old[i].Name] + if !ok { + deleteClusters = append(deleteClusters, old[i]) + continue + } + + if !reflect.DeepEqual(val, old[i]) { + updateClusters = append(updateClusters, val.Diff(old[i])) + continue + } + } + + return &ClustersEvent{ + AddCluster: addClusters, + UpdateCluster: updateClusters, + DeleteCluster: deleteClusters, + } +} + +func (d *DataSourceCluster) Diff(old *DataSourceCluster) *ClusterEvent { + + ret := &ClusterEvent{ + Name: d.Name, + Type: d.Type, + SqlMaxLimit: d.SqlMaxLimit, + Parameters: d.Parameters, + GroupsEvent: Groups(d.Groups).Diff(old.Groups), + } + + return ret +} + +func (g Groups) Diff(old Groups) *GroupsEvent { + addGroups := make([]*Group, 0, 4) + updateGroups := make([]*Group, 0, 4) + deleteGroups := make([]*Group, 0, 4) + + newTmp := map[string]*Group{} + oldTmp := map[string]*Group{} + + for i := range g { + newTmp[g[i].Name] = g[i] + } + for i := range old { + oldTmp[old[i].Name] = old[i] + } + + for i := range g { + if _, ok := oldTmp[g[i].Name]; !ok { + addGroups = append(addGroups, g[i]) + } + } + + for i := range old { + val, ok := newTmp[old[i].Name] + if !ok { + deleteGroups = append(deleteGroups, old[i]) + continue + } + + if !reflect.DeepEqual(val, old[i]) { + updateGroups = append(updateGroups, val) + continue + } + } + + return &GroupsEvent{ + AddGroups: addGroups, + DeleteGroups: deleteGroups, + UpdateGroups: updateGroups, + } +} + +func (s *ShardingRule) Diff(old *ShardingRule) *ShardingRuleEvent { + addTables := make([]*Table, 0, 4) + updateTables := make([]*Table, 0, 4) + deleteTables := make([]*Table, 0, 4) + + newTmp := map[string]*Table{} + oldTmp := map[string]*Table{} + + for i := range s.Tables { + newTmp[s.Tables[i].Name] = s.Tables[i] + } + for i := range old.Tables { + oldTmp[old.Tables[i].Name] = old.Tables[i] + } + + for i := range s.Tables { + if _, ok := oldTmp[s.Tables[i].Name]; !ok { + addTables = append(addTables, s.Tables[i]) + } + } + + for i := range old.Tables { + val, ok := newTmp[old.Tables[i].Name] + if !ok { + deleteTables = append(deleteTables, old.Tables[i]) + continue + } + + if !reflect.DeepEqual(val, old.Tables[i]) { + updateTables = append(updateTables, val) + continue + } + } + + return &ShardingRuleEvent{ + AddTables: addTables, + UpdateTables: updateTables, + DeleteTables: deleteTables, + } +} + +func (s *ShadowRule) Diff(old *ShadowRule) *ShadowRuleEvent { + addTables := make([]*ShadowTable, 0, 4) + updateTables := make([]*ShadowTable, 0, 4) + deleteTables := make([]*ShadowTable, 0, 4) + + newTmp := map[string]*ShadowTable{} + oldTmp := map[string]*ShadowTable{} + + for i := range s.ShadowTables { + newTmp[s.ShadowTables[i].Name] = s.ShadowTables[i] + } + for i := range old.ShadowTables { + oldTmp[old.ShadowTables[i].Name] = old.ShadowTables[i] + } + + for i := range s.ShadowTables { + if _, ok := oldTmp[s.ShadowTables[i].Name]; !ok { + addTables = append(addTables, s.ShadowTables[i]) + } + } + + for i := range old.ShadowTables { + val, ok := newTmp[old.ShadowTables[i].Name] + if !ok { + deleteTables = append(deleteTables, old.ShadowTables[i]) + continue + } + + if !reflect.DeepEqual(val, old.ShadowTables[i]) { + updateTables = append(updateTables, val) + continue + } + } + + return &ShadowRuleEvent{ + AddTables: addTables, + UpdateTables: updateTables, + DeleteTables: deleteTables, + } +} diff --git a/pkg/config/diff_test.go b/pkg/config/diff_test.go new file mode 100644 index 00000000..ae1f054d --- /dev/null +++ b/pkg/config/diff_test.go @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package config + +import ( + "testing" +) + +import ( + "github.com/stretchr/testify/assert" +) + +func TestNodes_Diff(t *testing.T) { + type args struct { + old Nodes + } + tests := []struct { + name string + n Nodes + args args + want *NodesEvent + }{ + { + name: "NotChange_Nodes", + n: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + args: struct{ old Nodes }{ + old: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + }, + want: &NodesEvent{ + AddNodes: []*Node{}, + UpdateNodes: []*Node{}, + DeleteNodes: []*Node{}, + }, + }, + { + name: "Change_AddNodes", + n: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + "mock_node_2": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + args: struct{ old Nodes }{ + old: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + }, + want: &NodesEvent{ + AddNodes: []*Node{ + { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + UpdateNodes: []*Node{}, + DeleteNodes: []*Node{}, + }, + }, + { + name: "Change_DeleteNodes", + n: map[string]*Node{}, + args: struct{ old Nodes }{ + old: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + }, + want: &NodesEvent{ + AddNodes: []*Node{}, + UpdateNodes: []*Node{}, + DeleteNodes: []*Node{ + { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + }, + }, + + { + name: "Change_UpdateNodes", + n: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + Parameters: map[string]string{ + "mock_param_key_1": "mock_param_value_1", + }, + }, + }, + args: struct{ old Nodes }{ + old: map[string]*Node{ + "mock_node_1": { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + }, + }, + }, + want: &NodesEvent{ + AddNodes: []*Node{}, + UpdateNodes: []*Node{ + { + Name: "mock_node_1", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db_1", + Parameters: map[string]string{ + "mock_param_key_1": "mock_param_value_1", + }, + }, + }, + DeleteNodes: []*Node{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, tt.n.Diff(tt.args.old), "Diff(%v)", tt.args.old) + }) + } +} diff --git a/pkg/config/equals.go b/pkg/config/equals.go new file mode 100644 index 00000000..22f81b10 --- /dev/null +++ b/pkg/config/equals.go @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package config + +import ( + "reflect" +) + +func (u *User) Equals(o *User) bool { + + return u.Username == o.Username && u.Password == o.Password +} + +func (n *Node) Equals(o *Node) bool { + if n.Name != o.Name { + return false + } + + if n.Host != o.Host || n.Port != o.Port { + return false + } + + if n.Database != o.Database || n.Username != o.Username || n.Password != o.Password { + return false + } + + if n.Weight != o.Weight { + return false + } + + if len(n.Labels) != len(o.Labels) { + return false + } + + if len(n.Labels) != len(o.Labels) || !reflect.DeepEqual(n.Labels, o.Labels) { + return false + } + + if len(n.Parameters) != len(o.Parameters) || !reflect.DeepEqual(n.Parameters, o.Parameters) { + return false + } + + return true +} + +func (r Rules) Equals(o Rules) bool { + if len(r) == 0 && len(o) == 0 { + return true + } + + if len(r) != len(o) { + return false + } + + newT := make([]*Rule, 0, 4) + updateT := make([]*Rule, 0, 4) + deleteT := make([]*Rule, 0, 4) + + newTmp := map[string]*Rule{} + oldTmp := map[string]*Rule{} + + for i := range r { + newTmp[r[i].Column] = r[i] + } + for i := range o { + oldTmp[o[i].Column] = o[i] + } + + for i := range r { + if _, ok := oldTmp[o[i].Column]; !ok { + newT = append(newT, o[i]) + } + } + + for i := range o { + val, ok := newTmp[o[i].Column] + if !ok { + deleteT = append(deleteT, o[i]) + continue + } + + if !reflect.DeepEqual(val, o[i]) { + updateT = append(updateT, val) + continue + } + } + + return len(newT) == 0 && len(updateT) == 0 && len(deleteT) == 0 +} + +func (t *Table) Equals(o *Table) bool { + if len(t.DbRules) != len(o.DbRules) { + return false + } + + if len(t.TblRules) != len(o.TblRules) { + return false + } + + if !Rules(t.DbRules).Equals(o.DbRules) { + return false + } + if !Rules(t.TblRules).Equals(o.TblRules) { + return false + } + + if !reflect.DeepEqual(t.Topology, o.Topology) || !reflect.DeepEqual(t.ShadowTopology, o.ShadowTopology) { + return false + } + + if t.AllowFullScan == o.AllowFullScan { + return false + } + + if !reflect.DeepEqual(t.Attributes, o.Attributes) { + return false + } + + return true +} diff --git a/pkg/config/etcd/etcd.go b/pkg/config/etcd/etcd.go index 5c8bbace..26fd8810 100644 --- a/pkg/config/etcd/etcd.go +++ b/pkg/config/etcd/etcd.go @@ -26,65 +26,79 @@ import ( ) import ( - etcdv3 "github.com/dubbogo/gost/database/kv/etcd/v3" - "go.etcd.io/etcd/api/v3/mvccpb" clientv3 "go.etcd.io/etcd/client/v3" + + "google.golang.org/grpc" ) import ( "github.com/arana-db/arana/pkg/config" - "github.com/arana-db/arana/pkg/util/env" "github.com/arana-db/arana/pkg/util/log" ) +var ( + PluginName = "etcd" +) + func init() { - config.Register(&storeOperate{}) + config.Register(&storeOperate{ + cancelList: make([]context.CancelFunc, 0, 4), + }) } type storeOperate struct { - client *etcdv3.Client - lock *sync.RWMutex + client *clientv3.Client + lock sync.RWMutex receivers map[config.PathKey]*etcdWatcher cancelList []context.CancelFunc } func (c *storeOperate) Init(options map[string]interface{}) error { endpoints, _ := options["endpoints"].(string) - tmpClient, err := etcdv3.NewConfigClientWithErr( - etcdv3.WithName(etcdv3.RegistryETCDV3Client), - etcdv3.WithTimeout(10*time.Second), - etcdv3.WithEndpoints(strings.Split(endpoints, ",")...), - ) + + ctx, cancel := context.WithCancel(context.Background()) + c.cancelList = append(c.cancelList, cancel) + + rawClient, err := clientv3.New(clientv3.Config{ + Context: ctx, + Endpoints: strings.Split(endpoints, ","), + DialTimeout: 10 * time.Second, + DialOptions: []grpc.DialOption{grpc.WithBlock()}, + }) + if err != nil { log.Errorf("failed to initialize etcd client error: %s", err.Error()) return err } - c.client = tmpClient - c.lock = &sync.RWMutex{} + c.client = rawClient c.receivers = make(map[config.PathKey]*etcdWatcher) - c.cancelList = make([]context.CancelFunc, 0, 2) return nil } func (c *storeOperate) Save(key config.PathKey, val []byte) error { - return c.client.Put(string(key), string(val)) + _, err := c.client.Put(context.Background(), string(key), string(val)) + if err != nil { + return err + } + + return nil } func (c *storeOperate) Get(key config.PathKey) ([]byte, error) { - v, err := c.client.Get(string(key)) + resp, err := c.client.Get(context.Background(), string(key)) if err != nil { return nil, err } - if env.IsDevelopEnvironment() { - log.Infof("[ConfigCenter][etcd] load config content : %#v", v) + if len(resp.Kvs) == 0 { + return nil, err } - return []byte(v), nil + return resp.Kvs[0].Value, nil } type etcdWatcher struct { @@ -128,14 +142,12 @@ func (w *etcdWatcher) run(ctx context.Context) { } func (c *storeOperate) Watch(key config.PathKey) (<-chan []byte, error) { + c.lock.Lock() defer c.lock.Unlock() if _, ok := c.receivers[key]; !ok { - watchCh, err := c.client.Watch(string(key)) - if err != nil { - return nil, err - } + watchCh := c.client.Watch(context.Background(), string(key)) w := newWatcher(watchCh) c.receivers[key] = w @@ -156,7 +168,7 @@ func (c *storeOperate) Watch(key config.PathKey) (<-chan []byte, error) { } func (c *storeOperate) Name() string { - return "etcd" + return PluginName } func (c *storeOperate) Close() error { diff --git a/pkg/config/etcd/etcd_test.go b/pkg/config/etcd/etcd_test.go index 1d9a6299..a42b53a9 100644 --- a/pkg/config/etcd/etcd_test.go +++ b/pkg/config/etcd/etcd_test.go @@ -18,6 +18,7 @@ package etcd import ( + "context" "encoding/json" "net/url" "testing" @@ -30,6 +31,8 @@ import ( "github.com/tidwall/gjson" "go.etcd.io/etcd/server/v3/embed" + + "gopkg.in/yaml.v3" ) import ( @@ -40,24 +43,23 @@ import ( const _defaultEtcdV3WorkDir = "/tmp/arana/config" var ( - mockConfData = map[config.PathKey]string{ - config.DefaultConfigMetadataPath: "", - config.DefaultConfigDataListenersPath: "", - config.DefaultConfigDataSourceClustersPath: "", - config.DefaultConfigDataShardingRulePath: "", - config.DefaultConfigDataTenantsPath: "", - } - - cfg *config.Configuration + mockConfData = map[config.PathKey]string{} + cfg *config.Configuration + mockPath = map[string]*config.PathInfo{} ) func doDataMock() { cfg, _ = config.Load(testdata.Path("fake_config.yaml")) - data, _ := json.Marshal(cfg) + for i := range cfg.Data.Tenants { + tenant := cfg.Data.Tenants[i] + mockPath[tenant.Name] = config.NewPathInfo(tenant.Name) - for k, v := range config.ConfigKeyMapping { - mockConfData[k] = string(gjson.GetBytes(data, v).String()) + data, _ := json.Marshal(tenant) + + for k, v := range mockPath[tenant.Name].ConfigKeyMapping { + mockConfData[k] = gjson.GetBytes(data, v).String() + } } } @@ -95,9 +97,11 @@ func Test_storeOpertae(t *testing.T) { doDataMock() cfg, _ := config.Load(testdata.Path("fake_config.yaml")) - data, _ := json.Marshal(cfg) - for k, v := range config.ConfigKeyMapping { - err := operate.Save(k, []byte(gjson.GetBytes(data, v).String())) + + tenantName := cfg.Data.Tenants[0].Name + + for k, v := range mockConfData { + err := operate.Save(k, []byte(v)) assert.NoError(t, err, "save must success") } @@ -108,26 +112,25 @@ func Test_storeOpertae(t *testing.T) { t.Logf("%s => %s", k, string(ret)) } - receiver, err := operate.Watch(config.DefaultConfigDataTenantsPath) + receiver, err := operate.Watch(mockPath[tenantName].DefaultConfigDataUsersPath) assert.NoError(t, err, "watch must success") newCfg, _ := config.Load(testdata.Path("fake_config.yaml")) - data, _ = json.Marshal(newCfg) - - expectVal := string(gjson.GetBytes(data, config.ConfigKeyMapping[config.DefaultConfigDataTenantsPath]).String()) - - for k := range config.ConfigKeyMapping { - if k == config.DefaultConfigDataTenantsPath { - err := operate.client.Put(string(k), expectVal) - assert.NoError(t, err, "put to etcd must success") - break - } + newCfg.Data.Tenants[0].Users = []*config.User{ + { + Username: "arana", + Password: "arana", + }, } + data, _ := yaml.Marshal(newCfg.Data.Tenants[0].Users) + + _, err = operate.client.Put(context.TODO(), string(mockPath[tenantName].DefaultConfigDataUsersPath), string(data)) + assert.NoError(t, err, "put to etcd must success") ret := <-receiver - t.Logf("expect val : %s", expectVal) + t.Logf("expect val : %s", string(data)) t.Logf("acutal val : %s", string(ret)) - assert.Equal(t, expectVal, string(ret)) + assert.Equal(t, string(data), string(ret)) } diff --git a/pkg/config/event.go b/pkg/config/event.go new file mode 100644 index 00000000..2fa5594c --- /dev/null +++ b/pkg/config/event.go @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package config + +type EventType int32 + +const ( + _ EventType = iota + EventTypeTenants + EventTypeUsers + EventTypeNodes + EventTypeClusters + EventTypeShardingRule + EventTypeShadowRule +) + +type ( + Event interface { + Type() EventType + } + + // TenantsEvent tenants event + TenantsEvent struct { + AddTenants Tenants + DeleteTenants Tenants + } + + // UsersEvent users event + UsersEvent struct { + AddUsers Users + UpdateUsers Users + DeleteUsers Users + } + + // ClustersEvent clusters event + ClustersEvent struct { + AddCluster Clusters + DeleteCluster Clusters + UpdateCluster []*ClusterEvent + } + + // ClusterEvent cluster event + ClusterEvent struct { + Name string + Type DataSourceType + SqlMaxLimit int + Parameters ParametersMap + GroupsEvent *GroupsEvent + } + + // GroupsEvent groups event + GroupsEvent struct { + AddGroups Groups + UpdateGroups Groups + DeleteGroups Groups + } + + // GroupEvent group event + GroupEvent struct { + Name string + AddNodes Nodes + UpdateNodes Nodes + DeleteNodes Nodes + } + + // ShardingRuleEvent sharding rule event + ShardingRuleEvent struct { + AddTables []*Table + UpdateTables []*Table + DeleteTables []*Table + } + + // ShadowRuleEvent shadow rule event + ShadowRuleEvent struct { + AddTables []*ShadowTable + UpdateTables []*ShadowTable + DeleteTables []*ShadowTable + } + + // NodesEvent nodes event + NodesEvent struct { + AddNodes []*Node + UpdateNodes []*Node + DeleteNodes []*Node + } +) + +func (e TenantsEvent) Type() EventType { + return EventTypeTenants +} + +func (e NodesEvent) Type() EventType { + return EventTypeNodes +} + +func (e UsersEvent) Type() EventType { + return EventTypeUsers +} + +func (e ClustersEvent) Type() EventType { + return EventTypeClusters +} + +func (e ShardingRuleEvent) Type() EventType { + return EventTypeShardingRule +} + +func (e ShadowRuleEvent) Type() EventType { + return EventTypeShadowRule +} diff --git a/pkg/config/file/file.go b/pkg/config/file/file.go index e3ffdfe9..c644cd75 100644 --- a/pkg/config/file/file.go +++ b/pkg/config/file/file.go @@ -18,11 +18,14 @@ package file import ( + "context" "encoding/json" "os" "path/filepath" "strings" "sync" + "sync/atomic" + "time" ) import ( @@ -40,20 +43,58 @@ import ( "github.com/arana-db/arana/pkg/util/log" ) -var configFilenameList = []string{"config.yaml", "config.yml"} +var ( + PluginName = "file" + configFilenameList = []string{"config.yaml", "config.yml"} +) func init() { config.Register(&storeOperate{}) } -type storeOperate struct { +type receiverBucket struct { lock sync.RWMutex - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers map[config.PathKey][]chan<- []byte +} + +func (b *receiverBucket) add(key config.PathKey, rec chan<- []byte) { + b.lock.Lock() + defer b.lock.Unlock() + + if _, ok := b.receivers[key]; !ok { + b.receivers[key] = make([]chan<- []byte, 0, 2) + } + b.receivers[key] = append(b.receivers[key], rec) +} + +func (b *receiverBucket) notifyWatcher(k config.PathKey, val []byte) { + b.lock.RLock() + defer b.lock.RUnlock() + + for i := range b.receivers[k] { + b.receivers[k][i] <- val + } +} + +type storeOperate struct { + initialize int32 + lock sync.RWMutex + contents map[config.PathKey]string + receivers *receiverBucket + cancels []context.CancelFunc + mapping map[string]*config.PathInfo } func (s *storeOperate) Init(options map[string]interface{}) error { - s.receivers = make(map[config.PathKey][]chan []byte) + if !atomic.CompareAndSwapInt32(&s.initialize, 0, 1) { + return nil + } + + ctx, cancel := context.WithCancel(context.Background()) + s.cancels = append(s.cancels, cancel) + + s.mapping = make(map[string]*config.PathInfo) + s.receivers = &receiverBucket{receivers: map[config.PathKey][]chan<- []byte{}} var ( content string ok bool @@ -79,80 +120,102 @@ func (s *storeOperate) Init(options map[string]interface{}) error { return errors.New("no config file found") } + path, err := formatPath(path) + if err != nil { + return err + } if err := s.readFromFile(path, &cfg); err != nil { return err } + + go s.watchFileChange(ctx, path) } - configJson, err := json.Marshal(cfg) - if err != nil { - return errors.Wrap(err, "config json.marshal failed") + for i := range cfg.Data.Tenants { + name := cfg.Data.Tenants[i].Name + s.mapping[name] = config.NewPathInfo(name) } - s.initCfgJsonMap(string(configJson)) + + s.updateContents(cfg, false) return nil } -func (s *storeOperate) initCfgJsonMap(val string) { - s.cfgJson = make(map[config.PathKey]string) +func (s *storeOperate) updateContents(cfg config.Configuration, notify bool) { + s.lock.Lock() + defer s.lock.Unlock() + + s.contents = make(map[config.PathKey]string) + + tenants := make([]string, 0, 4) + for i := range cfg.Data.Tenants { + tenants = append(tenants, cfg.Data.Tenants[i].Name) - for k, v := range config.ConfigKeyMapping { - s.cfgJson[k] = gjson.Get(val, v).String() + tmp, _ := json.Marshal(cfg.Data.Tenants[i]) + ret := string(tmp) + mapping := s.mapping[cfg.Data.Tenants[i].Name] + + for k, v := range mapping.ConfigKeyMapping { + val, _ := config.JSONToYAML(gjson.Get(ret, v).String()) + s.contents[k] = string(val) + if notify { + s.receivers.notifyWatcher(k, val) + } + } } + ret, _ := yaml.Marshal(tenants) + s.contents[config.DefaultTenantsPath] = string(ret) + if env.IsDevelopEnvironment() { - log.Infof("[ConfigCenter][File] load config content : %#v", s.cfgJson) + log.Debugf("[ConfigCenter][File] load config content : %#v", s.contents) } } func (s *storeOperate) Save(key config.PathKey, val []byte) error { + s.lock.Lock() + defer s.lock.Unlock() + + s.contents[key] = string(val) + s.receivers.notifyWatcher(key, val) return nil } +//Get func (s *storeOperate) Get(key config.PathKey) ([]byte, error) { - val := []byte(s.cfgJson[key]) + s.lock.RLock() + defer s.lock.RUnlock() + + val := []byte(s.contents[key]) return val, nil } -// Watch TODO change notification through file inotify mechanism +// Watch func (s *storeOperate) Watch(key config.PathKey) (<-chan []byte, error) { - s.lock.Lock() - defer s.lock.Unlock() - - if _, ok := s.receivers[key]; !ok { - s.receivers[key] = make([]chan []byte, 0, 2) - } - rec := make(chan []byte) - - s.receivers[key] = append(s.receivers[key], rec) - + s.receivers.add(key, rec) return rec, nil } func (s *storeOperate) Name() string { - return "file" + return PluginName } func (s *storeOperate) Close() error { + + for i := range s.cancels { + s.cancels[i]() + } + return nil } +//readFromFile func (s *storeOperate) readFromFile(path string, cfg *config.Configuration) error { var ( f *os.File err error ) - if strings.HasPrefix(path, "~") { - var home string - if home, err = os.UserHomeDir(); err != nil { - return err - } - path = strings.Replace(path, "~", home, 1) - } - - path = filepath.Clean(path) - if f, err = os.Open(path); err != nil { return errors.Wrapf(err, "failed to open arana config file '%s'", path) } @@ -179,3 +242,55 @@ func (s *storeOperate) searchDefaultConfigFile() (string, bool) { } return "", false } + +func formatPath(path string) (string, error) { + if strings.HasPrefix(path, "~") { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + path = strings.Replace(path, "~", home, 1) + } + + path = filepath.Clean(path) + + return path, nil +} + +//watchFileChange +func (s *storeOperate) watchFileChange(ctx context.Context, path string) { + + refreshT := time.NewTicker(30 * time.Second) + + oldStat, err := os.Stat(path) + if err != nil { + log.Errorf("[ConfigCenter][File] get file=%s stat fail : %s", path, err.Error()) + } + + for { + select { + case <-refreshT.C: + stat, err := os.Stat(path) + if err != nil { + log.Errorf("[ConfigCenter][File] get file=%s stat fail : %s", path, err.Error()) + continue + } + + if stat.ModTime().Equal(oldStat.ModTime()) { + continue + } + + cfg := &config.Configuration{} + if err := s.readFromFile(path, cfg); err != nil { + log.Errorf("[ConfigCenter][File] read file=%s and marshal to Configuration fail : %s", path, err.Error()) + return + } + + log.Errorf("[ConfigCenter][File] watch file=%s change : %+v", path, stat.ModTime()) + s.updateContents(*cfg, true) + case <-ctx.Done(): + + } + } + +} diff --git a/pkg/config/file/file_test.go b/pkg/config/file/file_test.go index 16a69c4d..e71a4f9d 100644 --- a/pkg/config/file/file_test.go +++ b/pkg/config/file/file_test.go @@ -19,9 +19,14 @@ package file import ( "reflect" + "sync" "testing" ) +import ( + "gopkg.in/yaml.v3" +) + import ( "github.com/arana-db/arana/pkg/config" "github.com/arana-db/arana/testdata" @@ -30,263 +35,115 @@ import ( var ( FakeConfigPath = testdata.Path("fake_config.yaml") EmptyConfigPath = testdata.Path("fake_empty_config.yaml") -) -var jsonConfig = `{ - "kind":"ConfigMap", - "apiVersion":"1.0", - "metadata":{ - "name":"arana-config" - }, - "data":{ - "listeners":[ - { - "protocol_type":"mysql", - "socket_address":{ - "address":"0.0.0.0", - "port":13306 - }, - "server_version":"5.7.0" - } - ], - "tenants":[ - { - "name":"arana", - "users":[ - { - "username":"arana", - "password":"123456" - }, - { - "username":"dksl", - "password":"123456" - } - ] - } - ], - "clusters":[ - { - "name":"employees", - "type":"mysql", - "sql_max_limit":-1, - "tenant":"arana", - "parameters":{ - "max_allowed_packet":"256M" - }, - "groups":[ - { - "name":"employees_0000", - "nodes":[ - { - "name":"node0", - "host":"arana-mysql", - "port":3306, - "username":"root", - "password":"123456", - "database":"employees_0000", - "parameters":null, - "weight":"r10w10" - }, - { - "name":"node0_r_0", - "host":"arana-mysql", - "port":3306, - "username":"root", - "password":"123456", - "database":"employees_0000_r", - "parameters":null, - "weight":"r0w0" - } - ] - }, - { - "name":"employees_0001", - "nodes":[ - { - "name":"node1", - "host":"arana-mysql", - "port":3306, - "username":"root", - "password":"123456", - "database":"employees_0001", - "parameters":null, - "weight":"r10w10" - } - ] - }, - { - "name":"employees_0002", - "nodes":[ - { - "name":"node2", - "host":"arana-mysql", - "port":3306, - "username":"root", - "password":"123456", - "database":"employees_0002", - "parameters":null, - "weight":"r10w10" - } - ] - }, - { - "name":"employees_0003", - "nodes":[ - { - "name":"node3", - "host":"arana-mysql", - "port":3306, - "username":"root", - "password":"123456", - "database":"employees_0003", - "parameters":null, - "weight":"r10w10" - } - ] - } - ] - } - ], - "sharding_rule":{ - "tables":[ - { - "name":"employees.student", - "sequence":{ - "type":"snowflake", - "option":null - }, - "allow_full_scan":true, - "db_rules":[ - { - "column":"uid", - "type":"scriptExpr", - "expr":"parseInt($value % 32 / 8)", - "step":0 - } - ], - "tbl_rules":[ - { - "column":"uid", - "type":"scriptExpr", - "expr":"$value % 32", - "step":32 - } - ], - "topology":{ - "db_pattern":"employees_${0000..0003}", - "tbl_pattern":"student_${0000..0031}" - }, - "shadow_topology":null, - "attributes":{ - "sqlMaxLimit":"-1" - } - } - ] - } - } -}` - -var yamlConfig = ` + yamlConfig = ` kind: ConfigMap apiVersion: "1.0" metadata: name: arana-config data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - tenants: - name: arana users: + - username: root + password: "123456" - username: arana password: "123456" - - username: dksl + clusters: + - name: employees + type: mysql + sql_max_limit: -1 + tenant: arana + parameters: + max_allowed_packet: 256M + groups: + - name: employees_0000 + nodes: + - node0 + - node0_r_0 + - name: employees_0001 + nodes: + - node1 + - name: employees_0002 + nodes: + - node2 + - name: employees_0003 + nodes: + - node3 + sharding_rule: + tables: + - name: employees.student + allow_full_scan: true + sequence: + type: snowflake + option: + db_rules: + - column: uid + type: scriptExpr + expr: parseInt($value % 32 / 8) + tbl_rules: + - column: uid + type: scriptExpr + expr: $value % 32 + step: 32 + topology: + db_pattern: employees_${0000..0003} + tbl_pattern: student_${0000..0031} + attributes: + sqlMaxLimit: -1 + nodes: + node0: + name: node0 + host: arana-mysql + port: 3306 + username: root password: "123456" - - clusters: - - name: employees - type: mysql - sql_max_limit: -1 - tenant: arana - parameters: - max_allowed_packet: 256M - groups: - - name: employees_0000 - nodes: - - name: node0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000 - weight: r10w10 - parameters: - - name: node0_r_0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000_r - weight: r0w0 - parameters: - - name: employees_0001 - nodes: - - name: node1 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0001 - weight: r10w10 - parameters: - - name: employees_0002 - nodes: - - name: node2 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0002 - weight: r10w10 - parameters: - - name: employees_0003 - nodes: - - name: node3 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0003 - weight: r10w10 - parameters: - sharding_rule: - tables: - - name: employees.student - allow_full_scan: true - db_rules: - - column: uid - type: scriptExpr - expr: parseInt($value % 32 / 8) - tbl_rules: - - column: uid - type: scriptExpr - expr: $value % 32 - step: 32 - topology: - db_pattern: employees_${0000..0003} - tbl_pattern: student_${0000..0031} - attributes: - sqlMaxLimit: -1 + database: employees_0000 + weight: r10w10 + parameters: + node0_r_0: + name: node0_r_0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000_r + weight: r0w0 + parameters: + node1: + name: node1 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0001 + weight: r10w10 + parameters: + node2: + name: node2 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0002 + weight: r10w10 + parameters: + node3: + name: node3 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0003 + weight: r10w10 + parameters: ` +) func Test_storeOperate_Close(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } tests := []struct { name string @@ -299,7 +156,7 @@ func Test_storeOperate_Close(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } if err := s.Close(); (err != nil) != tt.wantErr { t.Errorf("Close() error = %v, wantErr %v", err, tt.wantErr) @@ -310,8 +167,8 @@ func Test_storeOperate_Close(t *testing.T) { func Test_storeOperate_Get(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } type args struct { key config.PathKey @@ -335,7 +192,7 @@ func Test_storeOperate_Get(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } got, err := s.Get(tt.args.key) if (err != nil) != tt.wantErr { @@ -351,8 +208,8 @@ func Test_storeOperate_Get(t *testing.T) { func Test_storeOperate_Init(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } type args struct { options map[string]interface{} @@ -394,7 +251,7 @@ func Test_storeOperate_Init(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } if err := s.Init(tt.args.options); (err != nil) != tt.wantErr { t.Errorf("Init() error = %v, wantErr %v", err, tt.wantErr) @@ -405,8 +262,8 @@ func Test_storeOperate_Init(t *testing.T) { func Test_storeOperate_Name(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } tests := []struct { name string @@ -419,7 +276,7 @@ func Test_storeOperate_Name(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } if got := s.Name(); got != tt.want { t.Errorf("Name() = %v, want %v", got, tt.want) @@ -430,8 +287,8 @@ func Test_storeOperate_Name(t *testing.T) { func Test_storeOperate_Save(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } type args struct { key config.PathKey @@ -443,13 +300,19 @@ func Test_storeOperate_Save(t *testing.T) { args args wantErr bool }{ - {"Save", fields{}, args{}, false}, + {"Save", fields{ + receivers: &receiverBucket{ + lock: sync.RWMutex{}, + receivers: map[config.PathKey][]chan<- []byte{}, + }, + contents: map[config.PathKey]string{}, + }, args{}, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } if err := s.Save(tt.args.key, tt.args.val); (err != nil) != tt.wantErr { t.Errorf("Save() error = %v, wantErr %v", err, tt.wantErr) @@ -460,8 +323,8 @@ func Test_storeOperate_Save(t *testing.T) { func Test_storeOperate_Watch(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } type args struct { key config.PathKey @@ -474,7 +337,7 @@ func Test_storeOperate_Watch(t *testing.T) { }{ { "Watch", - fields{make(map[config.PathKey][]chan []byte), make(map[config.PathKey]string)}, + fields{&receiverBucket{receivers: map[config.PathKey][]chan<- []byte{}}, make(map[config.PathKey]string)}, args{"/arana-db/config/data/dataSourceClusters"}, false, }, @@ -483,7 +346,7 @@ func Test_storeOperate_Watch(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } got, err := s.Watch(tt.args.key) if (err != nil) != tt.wantErr { @@ -497,10 +360,10 @@ func Test_storeOperate_Watch(t *testing.T) { } } -func Test_storeOperate_initCfgJsonMap(t *testing.T) { +func Test_storeOperate_initContentsMap(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } type args struct { val string @@ -510,23 +373,37 @@ func Test_storeOperate_initCfgJsonMap(t *testing.T) { fields fields args args }{ - {"initCfgJsonMap", fields{}, args{jsonConfig}}, + {"initContentsMap", fields{ + receivers: &receiverBucket{ + lock: sync.RWMutex{}, + receivers: map[config.PathKey][]chan<- []byte{}, + }, + contents: map[config.PathKey]string{}, + }, args{yamlConfig}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } - s.initCfgJsonMap(tt.args.val) + + s.Init(map[string]interface{}{ + "content": yamlConfig, + }) + + cfg := new(config.Configuration) + _ = yaml.Unmarshal([]byte(tt.args.val), cfg) + + s.updateContents(*cfg, false) }) } } func Test_storeOperate_readFromFile(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } type args struct { path string @@ -554,7 +431,7 @@ func Test_storeOperate_readFromFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } if err := s.readFromFile(tt.args.path, tt.args.cfg); (err != nil) != tt.wantErr { t.Errorf("readFromFile() error = %v, wantErr %v", err, tt.wantErr) @@ -565,8 +442,8 @@ func Test_storeOperate_readFromFile(t *testing.T) { func Test_storeOperate_searchDefaultConfigFile(t *testing.T) { type fields struct { - receivers map[config.PathKey][]chan []byte - cfgJson map[config.PathKey]string + receivers *receiverBucket + contents map[config.PathKey]string } tests := []struct { name string @@ -580,7 +457,7 @@ func Test_storeOperate_searchDefaultConfigFile(t *testing.T) { t.Run(tt.name, func(t *testing.T) { s := &storeOperate{ receivers: tt.fields.receivers, - cfgJson: tt.fields.cfgJson, + contents: tt.fields.contents, } got, got1 := s.searchDefaultConfigFile() if got != tt.want { diff --git a/pkg/config/model.go b/pkg/config/model.go index dcb0cf41..e832af42 100644 --- a/pkg/config/model.go +++ b/pkg/config/model.go @@ -42,17 +42,16 @@ import ( ) type ( - // Configuration represents an Arana configuration. - Configuration struct { + DataRevision interface { + Revision() string + } + + Spec struct { Kind string `yaml:"kind" json:"kind,omitempty"` APIVersion string `yaml:"apiVersion" json:"apiVersion,omitempty"` Metadata map[string]interface{} `yaml:"metadata" json:"metadata"` - Data *Data `validate:"required,structonly" yaml:"data" json:"data"` } - // DataSourceType is the data source type - DataSourceType string - // SocketAddress specify either a logical or physical address and port, which are // used to tell server where to bind/listen, connect to upstream and find // management servers @@ -61,31 +60,46 @@ type ( Port int `default:"13306" yaml:"port" json:"port"` } + Listener struct { + ProtocolType string `yaml:"protocol_type" json:"protocol_type"` + SocketAddress *SocketAddress `yaml:"socket_address" json:"socket_address"` + ServerVersion string `yaml:"server_version" json:"server_version"` + } + + // Configuration represents an Arana configuration. + Configuration struct { + Spec `yaml:",inline"` + Data *Data `validate:"required,structonly" yaml:"data" json:"data"` + } + + // DataSourceType is the data source type + DataSourceType string + Data struct { - Listeners []*Listener `validate:"required,dive" yaml:"listeners" json:"listeners"` - Tenants []*Tenant `validate:"required,dive" yaml:"tenants" json:"tenants"` - DataSourceClusters []*DataSourceCluster `validate:"required,dive" yaml:"clusters" json:"clusters"` - ShardingRule *ShardingRule `validate:"required,dive" yaml:"sharding_rule,omitempty" json:"sharding_rule,omitempty"` - ShadowRule *ShadowRule `yaml:"shadow_rule,omitempty" json:"shadow_rule,omitempty"` + Tenants []*Tenant `validate:"required,dive" yaml:"tenants" json:"tenants"` } Tenant struct { - Name string `validate:"required" yaml:"name" json:"name"` - Users []*User `validate:"required" yaml:"users" json:"users"` + Spec + Name string `validate:"required" yaml:"name" json:"name"` + Users []*User `validate:"required" yaml:"users" json:"users"` + DataSourceClusters []*DataSourceCluster `validate:"required,dive" yaml:"clusters" json:"clusters"` + ShardingRule *ShardingRule `validate:"required,dive" yaml:"sharding_rule,omitempty" json:"sharding_rule,omitempty"` + ShadowRule *ShadowRule `yaml:"shadow_rule,omitempty" json:"shadow_rule,omitempty"` + Nodes map[string]*Node `validate:"required" yaml:"nodes" json:"nodes"` } DataSourceCluster struct { Name string `yaml:"name" json:"name"` Type DataSourceType `yaml:"type" json:"type"` SqlMaxLimit int `default:"-1" yaml:"sql_max_limit" json:"sql_max_limit,omitempty"` - Tenant string `yaml:"tenant" json:"tenant"` Parameters ParametersMap `yaml:"parameters" json:"parameters"` Groups []*Group `yaml:"groups" json:"groups"` } Group struct { - Name string `yaml:"name" json:"name"` - Nodes []*Node `yaml:"nodes" json:"nodes"` + Name string `yaml:"name" json:"name"` + Nodes []string `yaml:"nodes" json:"nodes"` } Node struct { @@ -135,12 +149,6 @@ type ( Regex string `yaml:"regex" json:"regex"` } - Listener struct { - ProtocolType string `yaml:"protocol_type" json:"protocol_type"` - SocketAddress *SocketAddress `yaml:"socket_address" json:"socket_address"` - ServerVersion string `yaml:"server_version" json:"server_version"` - } - User struct { Username string `yaml:"username" json:"username"` Password string `yaml:"password" json:"password"` @@ -246,6 +254,12 @@ func Load(path string) (*Configuration, error) { return &cfg, nil } +func (t *Tenant) Empty() bool { + return len(t.Users) == 0 && + len(t.Nodes) == 0 && + len(t.DataSourceClusters) == 0 +} + var _weightRegexp = regexp.MustCompile(`^[rR]([0-9]+)[wW]([0-9]+)$`) func (d *Node) GetReadAndWriteWeight() (int, int, error) { @@ -357,3 +371,25 @@ func GetConnPropIdleTime(connProps map[string]interface{}, defaultValue time.Dur return time.Duration(n) * time.Second } + +type ( + Clusters []*DataSourceCluster + Tenants []string + Nodes map[string]*Node + Groups []*Group + Users []*User + Rules []*Rule +) + +func NewEmptyTenant() *Tenant { + return &Tenant{ + Spec: Spec{ + Metadata: map[string]interface{}{}, + }, + Users: make([]*User, 0, 1), + DataSourceClusters: make([]*DataSourceCluster, 0, 1), + ShardingRule: new(ShardingRule), + ShadowRule: new(ShadowRule), + Nodes: map[string]*Node{}, + } +} diff --git a/pkg/config/model_test.go b/pkg/config/model_test.go index 74b74cc7..dac6fa23 100644 --- a/pkg/config/model_test.go +++ b/pkg/config/model_test.go @@ -59,18 +59,18 @@ func TestDataSourceClustersConf(t *testing.T) { assert.NoError(t, err) assert.NotEqual(t, nil, conf) - assert.Equal(t, 1, len(conf.Data.DataSourceClusters)) - dataSourceCluster := conf.Data.DataSourceClusters[0] + assert.Equal(t, 1, len(conf.Data.Tenants[0].DataSourceClusters)) + dataSourceCluster := conf.Data.Tenants[0].DataSourceClusters[0] assert.Equal(t, "employees", dataSourceCluster.Name) assert.Equal(t, config.DBMySQL, dataSourceCluster.Type) assert.Equal(t, -1, dataSourceCluster.SqlMaxLimit) - assert.Equal(t, "arana", dataSourceCluster.Tenant) + assert.Equal(t, "arana", conf.Data.Tenants[0].Name) assert.Equal(t, 4, len(dataSourceCluster.Groups)) group := dataSourceCluster.Groups[0] assert.Equal(t, "employees_0000", group.Name) assert.Equal(t, 2, len(group.Nodes)) - node := group.Nodes[0] + node := conf.Data.Tenants[0].Nodes["node0"] assert.Equal(t, "arana-mysql", node.Host) assert.Equal(t, 3306, node.Port) assert.Equal(t, "root", node.Username) @@ -86,9 +86,9 @@ func TestShardingRuleConf(t *testing.T) { assert.NoError(t, err) assert.NotEqual(t, nil, conf) - assert.NotNil(t, conf.Data.ShardingRule) - assert.Equal(t, 1, len(conf.Data.ShardingRule.Tables)) - table := conf.Data.ShardingRule.Tables[0] + assert.NotNil(t, conf.Data.Tenants[0].ShardingRule) + assert.Equal(t, 1, len(conf.Data.Tenants[0].ShardingRule.Tables)) + table := conf.Data.Tenants[0].ShardingRule.Tables[0] assert.Equal(t, "employees.student", table.Name) assert.Equal(t, true, table.AllowFullScan) @@ -104,7 +104,7 @@ func TestShardingRuleConf(t *testing.T) { assert.Equal(t, "student_${0000..0031}", table.Topology.TblPattern) // assert.Equal(t, "employee_0000", table.ShadowTopology.DbPattern) // assert.Equal(t, "__test_student_${0000...0007}", table.ShadowTopology.TblPattern) - assert.Len(t, table.Attributes, 2) + assert.Len(t, table.Attributes, 1) } func TestUnmarshalTextForProtocolTypeNil(t *testing.T) { diff --git a/pkg/config/nacos/nacos.go b/pkg/config/nacos/nacos.go index b9a35400..ad2a9edf 100644 --- a/pkg/config/nacos/nacos.go +++ b/pkg/config/nacos/nacos.go @@ -30,12 +30,12 @@ import ( "github.com/nacos-group/nacos-sdk-go/v2/clients/config_client" "github.com/nacos-group/nacos-sdk-go/v2/common/constant" "github.com/nacos-group/nacos-sdk-go/v2/vo" + + "github.com/pkg/errors" ) import ( "github.com/arana-db/arana/pkg/config" - "github.com/arana-db/arana/pkg/util/env" - "github.com/arana-db/arana/pkg/util/log" ) const ( @@ -48,6 +48,14 @@ const ( _server string = "endpoints" _contextPath string = "contextPath" _scheme string = "scheme" + + _pathSplit string = "::" +) + +var ( + PluginName = "nacos" + + ErrorPublishConfigFail = errors.New("save config into nacos fail") ) func init() { @@ -58,27 +66,19 @@ func init() { type storeOperate struct { groupName string client config_client.IConfigClient - confMap map[config.PathKey]string - cfgLock *sync.RWMutex - lock *sync.RWMutex + cfgLock sync.RWMutex + lock sync.RWMutex receivers map[config.PathKey]*nacosWatcher cancelList []context.CancelFunc } // Init plugin initialization func (s *storeOperate) Init(options map[string]interface{}) error { - s.lock = &sync.RWMutex{} - s.cfgLock = &sync.RWMutex{} - s.confMap = make(map[config.PathKey]string) s.receivers = make(map[config.PathKey]*nacosWatcher) if err := s.initNacosClient(options); err != nil { return err } - if err := s.loadDataFromServer(); err != nil { - return err - } - return nil } @@ -152,53 +152,42 @@ func parseClientConfig(options map[string]interface{}) constant.ClientConfig { return cc } -func (s *storeOperate) loadDataFromServer() error { - s.cfgLock.Lock() - defer s.cfgLock.Unlock() - - for dataId := range config.ConfigKeyMapping { - data, err := s.client.GetConfig(vo.ConfigParam{ - DataId: string(dataId), - Group: s.groupName, - }) - if err != nil { - return err - } - - s.confMap[dataId] = data - } - - return nil -} - // Save save a configuration data func (s *storeOperate) Save(key config.PathKey, val []byte) error { - _, err := s.client.PublishConfig(vo.ConfigParam{ + ok, err := s.client.PublishConfig(vo.ConfigParam{ Group: s.groupName, - DataId: string(key), + DataId: buildNacosDataId(string(key)), Content: string(val), }) + if err != nil { + return err + } - return err + if !ok { + return ErrorPublishConfigFail + } + return nil } // Get get a configuration func (s *storeOperate) Get(key config.PathKey) ([]byte, error) { - defer s.cfgLock.RUnlock() - s.cfgLock.RLock() - - val := []byte(s.confMap[key]) + ret, err := s.client.GetConfig(vo.ConfigParam{ + DataId: buildNacosDataId(string(key)), + Group: s.groupName, + }) - if env.IsDevelopEnvironment() { - log.Infof("[ConfigCenter][nacos] load config content : %#v", string(val)) + if err != nil { + return nil, err } - return val, nil + + return []byte(ret), nil } // Watch Monitor changes of the key func (s *storeOperate) Watch(key config.PathKey) (<-chan []byte, error) { - defer s.lock.Unlock() s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.receivers[key]; !ok { w, err := s.newWatcher(key, s.client) if err != nil { @@ -213,8 +202,8 @@ func (s *storeOperate) Watch(key config.PathKey) (<-chan []byte, error) { w := s.receivers[key] - defer w.lock.Unlock() w.lock.Lock() + defer w.lock.Unlock() rec := make(chan []byte) s.receivers[key].receivers = append(s.receivers[key].receivers, rec) @@ -223,11 +212,12 @@ func (s *storeOperate) Watch(key config.PathKey) (<-chan []byte, error) { // Name plugin name func (s *storeOperate) Name() string { - return "nacos" + return PluginName } -// Close do close storeOperate +// Close closes storeOperate func (s *storeOperate) Close() error { + s.client.CloseClient() return nil } @@ -244,13 +234,13 @@ func (s *storeOperate) newWatcher(key config.PathKey, client config_client.IConf } err := client.ListenConfig(vo.ConfigParam{ - DataId: string(key), + DataId: buildNacosDataId(string(key)), Group: s.groupName, OnChange: func(_, _, dataId, content string) { s.cfgLock.Lock() defer s.cfgLock.Unlock() - s.confMap[config.PathKey(dataId)] = content + dataId = revertNacosDataId(dataId) s.receivers[config.PathKey(dataId)].ch <- []byte(content) }, }) @@ -276,3 +266,11 @@ func (w *nacosWatcher) run(ctx context.Context) { } } } + +func buildNacosDataId(v string) string { + return strings.ReplaceAll(v, "/", _pathSplit) +} + +func revertNacosDataId(v string) string { + return strings.ReplaceAll(v, _pathSplit, "/") +} diff --git a/pkg/config/nacos/nacos_test.go b/pkg/config/nacos/nacos_test.go index bccb523c..a77f22e6 100644 --- a/pkg/config/nacos/nacos_test.go +++ b/pkg/config/nacos/nacos_test.go @@ -33,6 +33,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/tidwall/gjson" + + "gopkg.in/yaml.v3" ) import ( @@ -41,25 +43,23 @@ import ( ) var ( - mockConfData = map[config.PathKey]string{ - config.DefaultConfigPath: "", - config.DefaultConfigMetadataPath: "", - config.DefaultConfigDataListenersPath: "", - config.DefaultConfigDataSourceClustersPath: "", - config.DefaultConfigDataShardingRulePath: "", - config.DefaultConfigDataTenantsPath: "", - } - - cfg *config.Configuration + mockConfData = map[config.PathKey]string{} + cfg *config.Configuration + mockPath = map[string]*config.PathInfo{} ) func doDataMock() { cfg, _ = config.Load(testdata.Path("fake_config.yaml")) - data, _ := json.Marshal(cfg) + for i := range cfg.Data.Tenants { + tenant := cfg.Data.Tenants[i] + mockPath[tenant.Name] = config.NewPathInfo(tenant.Name) - for k, v := range config.ConfigKeyMapping { - mockConfData[k] = string(gjson.GetBytes(data, v).String()) + data, _ := json.Marshal(tenant) + + for k, v := range mockPath[tenant.Name].ConfigKeyMapping { + mockConfData[k] = gjson.GetBytes(data, v).String() + } } } @@ -177,9 +177,6 @@ func buildOperate() *storeOperate { operate := &storeOperate{ groupName: "arana", client: newNacosClient(), - confMap: make(map[config.PathKey]string), - cfgLock: &sync.RWMutex{}, - lock: &sync.RWMutex{}, receivers: make(map[config.PathKey]*nacosWatcher), cancelList: []context.CancelFunc{}, } @@ -188,52 +185,42 @@ func buildOperate() *storeOperate { return operate } -func Test_loadDataFromServer(t *testing.T) { - operate := buildOperate() - defer operate.client.CloseClient() - - err := operate.loadDataFromServer() - assert.NoError(t, err, "") - - for k, v := range operate.confMap { - assert.Equalf(t, mockConfData[k], v, "%s should be equal", k) - } -} - func Test_watch(t *testing.T) { operate := buildOperate() defer operate.client.CloseClient() - err := operate.loadDataFromServer() - assert.NoError(t, err, "should be success") - - assert.NoError(t, err, "should be success") - newCfg, _ := config.Load(testdata.Path("fake_config.yaml")) - receiver, err := operate.Watch(config.DefaultConfigDataTenantsPath) + newCfg.Data.Tenants[0].Nodes = map[string]*config.Node{ + "node0": { + Name: "node0", + Host: "127.0.0.1", + Port: 3306, + Username: "arana", + Password: "arana", + Database: "mock_db", + }, + } + + receiver, err := operate.Watch(mockPath[newCfg.Data.Tenants[0].Name].DefaultConfigDataNodesPath) assert.NoError(t, err, "should be success") - data, err := json.Marshal(newCfg) + data, err := yaml.Marshal(newCfg.Data.Tenants[0].Nodes) assert.NoError(t, err, "should be marshal success") - for k, v := range config.ConfigKeyMapping { - if k == config.DefaultConfigDataTenantsPath { - operate.client.PublishConfig(vo.ConfigParam{ - DataId: string(config.DefaultConfigDataTenantsPath), - Content: string(gjson.GetBytes(data, v).String()), - }) - } - } + ok, err := operate.client.PublishConfig(vo.ConfigParam{ + DataId: buildNacosDataId(string(mockPath[newCfg.Data.Tenants[0].Name].DefaultConfigDataNodesPath)), + Content: string(data), + }) + assert.True(t, ok) + assert.NoError(t, err) t.Logf("new config val : %s", string(data)) ret := <-receiver - expectVal := string(gjson.GetBytes(data, config.ConfigKeyMapping[config.DefaultConfigDataTenantsPath]).String()) - - t.Logf("expect val : %s", expectVal) + t.Logf("expect val : %s", string(data)) t.Logf("acutal val : %s", string(ret)) - assert.Equal(t, expectVal, string(ret)) + assert.Equal(t, string(data), string(ret)) } diff --git a/pkg/util/match/slice_match.go b/pkg/util/match/slice_match.go new file mode 100644 index 00000000..062e6d57 --- /dev/null +++ b/pkg/util/match/slice_match.go @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package match + +import ( + "bytes" + "reflect" +) + +// Copy from +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +func ElementsMatch(listA, listB interface{}) (ok bool) { + if isEmpty(listA) && isEmpty(listB) { + return true + } + + if !isList(listA) || !isList(listB) { + return false + } + + extraA, extraB := diffLists(listA, listB) + + if len(extraA) == 0 && len(extraB) == 0 { + return true + } + + return false +} + +// isList checks that the provided value is array or slice. +func isList(list interface{}, msgAndArgs ...interface{}) (ok bool) { + kind := reflect.TypeOf(list).Kind() + if kind != reflect.Array && kind != reflect.Slice { + return false + } + return true +} + +// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B. +// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and +// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored. +func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + extraA = append(extraA, element) + } + } + + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + extraB = append(extraB, bValue.Index(j).Interface()) + } + + return +} + +func ObjectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + + exp, ok := expected.([]byte) + if !ok { + return reflect.DeepEqual(expected, actual) + } + + act, ok := actual.([]byte) + if !ok { + return false + } + if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) +} diff --git a/test/suite.go b/test/suite.go index 51e3f762..64c6501f 100644 --- a/test/suite.go +++ b/test/suite.go @@ -130,7 +130,7 @@ type MySuite struct { db *sql.DB dbSync sync.Once - tmpFile, bootstrapConfig, configPath, scriptPath string + tmpBootFile, tmpFile, bootstrapConfig, configPath, scriptPath string cases *Cases actualDataset *Message @@ -193,7 +193,8 @@ func (ms *MySuite) DB() *sql.DB { } func (ms *MySuite) SetupSuite() { - if ms.devMode { + devMode := os.Getenv("ARANA_DEBUG_MODE") + if ms.devMode || devMode == "true" || devMode == "on" || devMode == "1" { return } @@ -212,6 +213,8 @@ func (ms *MySuite) SetupSuite() { require.NoError(ms.T(), err) mysqlAddr := fmt.Sprintf("%s:%d", ms.container.Host, ms.container.Port) + // random port + ms.port = 13306 + int(rand2.Int31n(10000)) ms.T().Logf("====== mysql is listening on %s ======\n", mysqlAddr) ms.T().Logf("====== arana will listen on 127.0.0.1:%d ======\n", ms.port) @@ -220,8 +223,9 @@ func (ms *MySuite) SetupSuite() { ms.configPath = "../conf/config.yaml" } cfgPath := testdata.Path(ms.configPath) + bootPath := testdata.Path("../conf/bootstrap.yaml") - err = ms.createConfigFile(cfgPath, ms.container.Host, ms.container.Port) + err = ms.createConfigFile(cfgPath, bootPath, ms.container.Host, ms.container.Port) require.NoError(ms.T(), err) if ms.bootstrapConfig == "" { @@ -229,7 +233,7 @@ func (ms *MySuite) SetupSuite() { } go func() { _ = os.Setenv(constants.EnvConfigPath, ms.tmpFile) - start.Run(testdata.Path("../conf/bootstrap.yaml"), "") + start.Run(ms.tmpBootFile, "") }() // waiting for arana server started @@ -251,10 +255,12 @@ func (ms *MySuite) TearDownSuite() { if ms.db != nil { _ = ms.db.Close() } - _ = ms.container.Terminate(context.Background()) + if ms.container != nil { + _ = ms.container.Terminate(context.Background()) + } } -func (ms *MySuite) createConfigFile(cfgPath, host string, port int) error { +func (ms *MySuite) createConfigFile(cfgPath, bootCfgPath, host string, port int) error { b, err := ioutil.ReadFile(cfgPath) if err != nil { return err @@ -285,6 +291,34 @@ func (ms *MySuite) createConfigFile(cfgPath, host string, port int) error { return err } + // resolve host and ports + bb, err := ioutil.ReadFile(bootCfgPath) + if err != nil { + return err + } + + bootContent := strings. + NewReplacer( + "arana-mysql", host, + "13306", strconv.Itoa(ms.port), // arana port + "3306", strconv.Itoa(port), // mysql port + ). + Replace(string(bb)) + + // clone temp config file + bf, err := ioutil.TempFile("", "arana-boot.*.yaml") + if err != nil { + return err + } + defer func() { + _ = bf.Close() + }() + if _, err = bf.WriteString(bootContent); err != nil { + return err + } + + ms.tmpBootFile = bf.Name() + ms.T().Logf("====== generate temp arana bootstrap config: %s ======\n", ms.tmpBootFile) ms.tmpFile = f.Name() ms.T().Logf("====== generate temp arana config: %s ======\n", ms.tmpFile) diff --git a/testdata/fake_bootstrap.yaml b/testdata/fake_bootstrap.yaml index 6523bdc6..8b1752ff 100644 --- a/testdata/fake_bootstrap.yaml +++ b/testdata/fake_bootstrap.yaml @@ -15,66 +15,28 @@ # limitations under the License. # +kind: ConfigMap +apiVersion: "1.0" +listeners: + - protocol_type: mysql + server_version: 5.7.0 + socket_address: + address: 0.0.0.0 + port: 13306 config: name: file options: - content: |- - kind: ConfigMap - apiVersion: "1.0" - metadata: - name: arana-config - data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - - tenants: - - name: arana - users: - - username: arana - password: "123456" - - clusters: - - name: employee - type: mysql - sql_max_limit: -1 - tenant: arana - parameters: - max_allowed_packet: 256M - groups: - - name: employee_0000 - nodes: - - name: node_1 - host: 127.0.0.1 - port: 3306 - username: root - password: "123456" - database: employees_0001 - weight: r10w10 - parameters: - - sharding_rule: - tables: - - name: employee.student - allow_full_scan: true - db_rules: - - column: student_id - type: modShard - expr: modShard(3) - tbl_rules: - - column: student_id - type: modShard - expr: modShard(8) - topology: - db_pattern: employee_0000 - tbl_pattern: student_${0000...0007} - attributes: - sqlMaxLimit: -1 - foo: bar # name: etcd # options: - # endpoints: "http://localhost:2382" \ No newline at end of file + # endpoints: "http://localhost:2379" + + # name: nacos + # options: + # endpoints: "localhost:8080" + # namespace: arana + # group: arana + # contextPath: /nacos + # scheme: http + # username: nacos + # password: nacos diff --git a/testdata/fake_config.yaml b/testdata/fake_config.yaml index 2bad53c6..eade8b61 100644 --- a/testdata/fake_config.yaml +++ b/testdata/fake_config.yaml @@ -20,96 +20,99 @@ apiVersion: "1.0" metadata: name: arana-config data: - listeners: - - protocol_type: mysql - server_version: 5.7.0 - socket_address: - address: 0.0.0.0 - port: 13306 - tenants: - name: arana users: + - username: root + password: "123456" - username: arana password: "123456" - - username: dksl + clusters: + - name: employees + type: mysql + sql_max_limit: -1 + tenant: arana + parameters: + max_allowed_packet: 256M + groups: + - name: employees_0000 + nodes: + - node0 + - node0_r_0 + - name: employees_0001 + nodes: + - node1 + - name: employees_0002 + nodes: + - node2 + - name: employees_0003 + nodes: + - node3 + sharding_rule: + tables: + - name: employees.student + allow_full_scan: true + sequence: + type: snowflake + option: + db_rules: + - column: uid + type: scriptExpr + expr: parseInt($value % 32 / 8) + tbl_rules: + - column: uid + type: scriptExpr + expr: $value % 32 + step: 32 + topology: + db_pattern: employees_${0000..0003} + tbl_pattern: student_${0000..0031} + attributes: + sqlMaxLimit: -1 + nodes: + node0: + name: node0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000 + weight: r10w10 + parameters: + node0_r_0: + name: node0_r_0 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0000_r + weight: r0w0 + parameters: + node1: + name: node1 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0001 + weight: r10w10 + parameters: + node2: + name: node2 + host: arana-mysql + port: 3306 + username: root + password: "123456" + database: employees_0002 + weight: r10w10 + parameters: + node3: + name: node3 + host: arana-mysql + port: 3306 + username: root password: "123456" + database: employees_0003 + weight: r10w10 + parameters: - clusters: - - name: employees - type: mysql - sql_max_limit: -1 - tenant: arana - parameters: - max_allowed_packet: 256M - groups: - - name: employees_0000 - nodes: - - name: node0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000 - weight: r10w10 - parameters: - - name: node0_r_0 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0000_r - weight: r0w0 - parameters: - - name: employees_0001 - nodes: - - name: node1 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0001 - weight: r10w10 - parameters: - - name: employees_0002 - nodes: - - name: node2 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0002 - weight: r10w10 - parameters: - - name: employees_0003 - nodes: - - name: node3 - host: arana-mysql - port: 3306 - username: root - password: "123456" - database: employees_0003 - weight: r10w10 - parameters: - sharding_rule: - tables: - - name: employees.student - allow_full_scan: true - sequence: - type: snowflake - option: - db_rules: - - column: uid - type: scriptExpr - expr: parseInt($value % 32 / 8) - tbl_rules: - - column: uid - type: scriptExpr - expr: $value % 32 - step: 32 - topology: - db_pattern: employees_${0000..0003} - tbl_pattern: student_${0000..0031} - attributes: - sqlMaxLimit: -1 - foo: bar